summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-01-26 22:19:57 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-01-26 22:19:57 -0500
commit97309d1a0bbdcb0813ea08574b4473d8e5416012 (patch)
tree0dcbcf5aa5146147e0ac1d0c4b73b868a67d333b /drivers
parentb4ea75b649417606fd6b38710a2962ec9770e772 (diff)
parentefd51b5c6798d103e3aa683464aebb2019b62119 (diff)
Merge branch 'upstream-fixes'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/synclink_gt.c2
-rw-r--r--drivers/char/tlclk.c93
-rw-r--r--drivers/edac/Kconfig102
-rw-r--r--drivers/edac/Makefile18
-rw-r--r--drivers/edac/amd76x_edac.c356
-rw-r--r--drivers/edac/e752x_edac.c1071
-rw-r--r--drivers/edac/e7xxx_edac.c558
-rw-r--r--drivers/edac/edac_mc.c2209
-rw-r--r--drivers/edac/edac_mc.h448
-rw-r--r--drivers/edac/i82860_edac.c299
-rw-r--r--drivers/edac/i82875p_edac.c532
-rw-r--r--drivers/edac/r82600_edac.c407
-rw-r--r--drivers/md/kcopyd.c1
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/acenic.c4
-rw-r--r--drivers/net/b44.c5
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cassini.c36
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/serial/8250.c13
-rw-r--r--drivers/serial/8250_pci.c10
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/at91_serial.c2
-rw-r--r--drivers/serial/sn_console.c129
-rw-r--r--drivers/serial/suncore.c34
-rw-r--r--drivers/serial/sunsab.c7
28 files changed, 6215 insertions, 131 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 283c089537b..bddf431bbb7 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,4 +68,6 @@ source "drivers/infiniband/Kconfig"
source "drivers/sn/Kconfig"
+source "drivers/edac/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7c45050ecd0..619dd964c51 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_PHONE) += telephony/
obj-$(CONFIG_MD) += md/
obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_ISDN) += isdn/
+obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 07c9be6a6bb..a85a60a93de 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -2630,7 +2630,7 @@ static int get_interface(struct slgt_info *info, int __user *if_mode)
static int set_interface(struct slgt_info *info, int if_mode)
{
unsigned long flags;
- unsigned char val;
+ unsigned short val;
DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
spin_lock_irqsave(&info->lock,flags);
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index bc56df8a347..4c272189cd4 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
-#include <linux/delay.h> /* udelay */
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
@@ -156,6 +155,8 @@ This directory exports the following interfaces. There operation is
documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
alarms :
current_ref :
+received_ref_clk3a :
+received_ref_clk3b :
enable_clk3a_output :
enable_clk3b_output :
enable_clka0_output :
@@ -165,7 +166,7 @@ enable_clkb1_output :
filter_select :
hardware_switching :
hardware_switching_mode :
-interrupt_switch :
+telclock_version :
mode_select :
refalign :
reset :
@@ -173,7 +174,6 @@ select_amcb1_transmit_clock :
select_amcb2_transmit_clock :
select_redundant_clock :
select_ref_frequency :
-test_mode :
All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
has the same effect as echo 0x99 > refalign.
@@ -226,7 +226,7 @@ static int tlclk_release(struct inode *inode, struct file *filp)
return 0;
}
-ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
+static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
if (count < sizeof(struct tlclk_alarms))
@@ -242,7 +242,7 @@ ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
return sizeof(struct tlclk_alarms);
}
-ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
+static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
loff_t *f_pos)
{
return 0;
@@ -278,21 +278,21 @@ static ssize_t show_current_ref(struct device *d,
static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
-static ssize_t show_interrupt_switch(struct device *d,
+static ssize_t show_telclock_version(struct device *d,
struct device_attribute *attr, char *buf)
{
unsigned long ret_val;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
- ret_val = inb(TLCLK_REG6);
+ ret_val = inb(TLCLK_REG5);
spin_unlock_irqrestore(&event_lock, flags);
return sprintf(buf, "0x%lX\n", ret_val);
}
-static DEVICE_ATTR(interrupt_switch, S_IRUGO,
- show_interrupt_switch, NULL);
+static DEVICE_ATTR(telclock_version, S_IRUGO,
+ show_telclock_version, NULL);
static ssize_t show_alarms(struct device *d,
struct device_attribute *attr, char *buf)
@@ -309,6 +309,50 @@ static ssize_t show_alarms(struct device *d,
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static ssize_t store_received_ref_clk3a(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xef, val);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3a, S_IWUGO, NULL,
+ store_received_ref_clk3a);
+
+
+static ssize_t store_received_ref_clk3b(struct device *d,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long tmp;
+ unsigned char val;
+ unsigned long flags;
+
+ sscanf(buf, "%lX", &tmp);
+ dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+ val = (unsigned char)tmp;
+ spin_lock_irqsave(&event_lock, flags);
+ SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3b, S_IWUGO, NULL,
+ store_received_ref_clk3b);
+
+
static ssize_t store_enable_clk3b_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -436,26 +480,6 @@ static ssize_t store_enable_clka0_output(struct device *d,
static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
store_enable_clka0_output);
-static ssize_t store_test_mode(struct device *d,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned long flags;
- unsigned long tmp;
- unsigned char val;
-
- sscanf(buf, "%lX", &tmp);
- dev_dbg(d, "tmp = 0x%lX\n", tmp);
-
- val = (unsigned char)tmp;
- spin_lock_irqsave(&event_lock, flags);
- SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
- spin_unlock_irqrestore(&event_lock, flags);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
-
static ssize_t store_select_amcb2_transmit_clock(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -475,7 +499,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
switch (val) {
case CLK_8_592MHz:
- SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
break;
case CLK_11_184MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
@@ -484,7 +508,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
break;
case CLK_44_736MHz:
- SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+ SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
break;
}
} else
@@ -653,9 +677,7 @@ static ssize_t store_refalign (struct device *d,
dev_dbg(d, "tmp = 0x%lX\n", tmp);
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
- udelay(2);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
- udelay(2);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
spin_unlock_irqrestore(&event_lock, flags);
@@ -706,15 +728,16 @@ static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
static struct attribute *tlclk_sysfs_entries[] = {
&dev_attr_current_ref.attr,
- &dev_attr_interrupt_switch.attr,
+ &dev_attr_telclock_version.attr,
&dev_attr_alarms.attr,
+ &dev_attr_received_ref_clk3a.attr,
+ &dev_attr_received_ref_clk3b.attr,
&dev_attr_enable_clk3a_output.attr,
&dev_attr_enable_clk3b_output.attr,
&dev_attr_enable_clkb1_output.attr,
&dev_attr_enable_clka1_output.attr,
&dev_attr_enable_clkb0_output.attr,
&dev_attr_enable_clka0_output.attr,
- &dev_attr_test_mode.attr,
&dev_attr_select_amcb1_transmit_clock.attr,
&dev_attr_select_amcb2_transmit_clock.attr,
&dev_attr_select_redundant_clock.attr,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644
index 00000000000..4819e7fc00d
--- /dev/null
+++ b/drivers/edac/Kconfig
@@ -0,0 +1,102 @@
+#
+# EDAC Kconfig
+# Copyright (c) 2003 Linux Networx
+# Licensed and distributed under the GPL
+#
+# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
+#
+
+menu 'EDAC - error detection and reporting (RAS)'
+
+config EDAC
+ tristate "EDAC core system error reporting"
+ depends on X86
+ default y
+ help
+ EDAC is designed to report errors in the core system.
+ These are low-level errors that are reported in the CPU or
+ supporting chipset: memory errors, cache errors, PCI errors,
+ thermal throttling, etc.. If unsure, select 'Y'.
+
+
+comment "Reporting subsystems"
+ depends on EDAC
+
+config EDAC_DEBUG
+ bool "Debugging"
+ depends on EDAC
+ help
+ This turns on debugging information for the entire EDAC
+ sub-system. You can insert module with "debug_level=x", current
+ there're four debug levels (x=0,1,2,3 from low to high).
+ Usually you should select 'N'.
+
+config EDAC_MM_EDAC
+ tristate "Main Memory EDAC (Error Detection And Correction) reporting"
+ depends on EDAC
+ default y
+ help
+ Some systems are able to detect and correct errors in main
+ memory. EDAC can report statistics on memory error
+ detection and correction (EDAC - or commonly referred to ECC
+ errors). EDAC will also try to decode where these errors
+ occurred so that a particular failing memory module can be
+ replaced. If unsure, select 'Y'.
+
+
+config EDAC_AMD76X
+ tristate "AMD 76x (760, 762, 768)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the AMD 76x
+ series of chipsets used with the Athlon processor.
+
+config EDAC_E7XXX
+ tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ E7205, E7500, E7501 and E7505 server chipsets.
+
+config EDAC_E752X
+ tristate "Intel e752x (e7520, e7525, e7320)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ E7520, E7525, E7320 server chipsets.
+
+config EDAC_I82875P
+ tristate "Intel 82875p (D82875P, E7210)"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ DP82785P and E7210 server chipsets.
+
+config EDAC_I82860
+ tristate "Intel 82860"
+ depends on EDAC_MM_EDAC && PCI
+ help
+ Support for error detection and correction on the Intel
+ 82860 chipset.
+
+config EDAC_R82600
+ tristate "Radisys 82600 embedded chipset"
+ depends on EDAC_MM_EDAC
+ help
+ Support for error detection and correction on the Radisys
+ 82600 embedded chipset.
+
+choice
+ prompt "Error detecting method"
+ depends on EDAC
+ default EDAC_POLL
+
+config EDAC_POLL
+ bool "Poll for errors"
+ depends on EDAC
+ help
+ Poll the chipset periodically to detect errors.
+
+endchoice
+
+endmenu
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644
index 00000000000..93137fdab4b
--- /dev/null
+++ b/drivers/edac/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux kernel EDAC drivers.
+#
+# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
+# This file may be distributed under the terms of the
+# GNU General Public License.
+#
+# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
+
+
+obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o
+obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
+obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
+obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
+obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
+obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
+obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
+
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644
index 00000000000..2fcc8120b53
--- /dev/null
+++ b/drivers/edac/amd76x_edac.c
@@ -0,0 +1,356 @@
+/*
+ * AMD 76x Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#define AMD76X_NR_CSROWS 8
+#define AMD76X_NR_CHANS 1
+#define AMD76X_NR_DIMMS 4
+
+
+/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
+#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
+ *
+ * 31:16 reserved
+ * 15:14 SERR enabled: x1=ue 1x=ce
+ * 13 reserved
+ * 12 diag: disabled, enabled
+ * 11:10 mode: dis, EC, ECC, ECC+scrub
+ * 9:8 status: x1=ue 1x=ce
+ * 7:4 UE cs row
+ * 3:0 CE cs row
+ */
+#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
+ *
+ * 31:26 clock disable 5 - 0
+ * 25 SDRAM init
+ * 24 reserved
+ * 23 mode register service
+ * 22:21 suspend to RAM
+ * 20 burst refresh enable
+ * 19 refresh disable
+ * 18 reserved
+ * 17:16 cycles-per-refresh
+ * 15:8 reserved
+ * 7:0 x4 mode enable 7 - 0
+ */
+#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
+ *
+ * 31:23 chip-select base
+ * 22:16 reserved
+ * 15:7 chip-select mask
+ * 6:3 reserved
+ * 2:1 address mode
+ * 0 chip-select enable
+ */
+
+
+struct amd76x_error_info {
+ u32 ecc_mode_status;
+};
+
+
+enum amd76x_chips {
+ AMD761 = 0,
+ AMD762
+};
+
+
+struct amd76x_dev_info {
+ const char *ctl_name;
+};
+
+
+static const struct amd76x_dev_info amd76x_devs[] = {
+ [AMD761] = {.ctl_name = "AMD761"},
+ [AMD762] = {.ctl_name = "AMD762"},
+};
+
+
+/**
+ * amd76x_get_error_info - fetch error information
+ * @mci: Memory controller
+ * @info: Info to fill in
+ *
+ * Fetch and store the AMD76x ECC status. Clear pending status
+ * on the chip so that further errors will be reported
+ */
+
+static void amd76x_get_error_info (struct mem_ctl_info *mci,
+ struct amd76x_error_info *info)
+{
+ pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
+ &info->ecc_mode_status);
+
+ if (info->ecc_mode_status & BIT(8))
+ pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
+ (u32) BIT(8), (u32) BIT(8));
+
+ if (info->ecc_mode_status & BIT(9))
+ pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
+ (u32) BIT(9), (u32) BIT(9));
+}
+
+
+/**
+ * amd76x_process_error_info - Error check
+ * @mci: Memory controller
+ * @info: Previously fetched information from chip
+ * @handle_errors: 1 if we should do recovery
+ *
+ * Process the chip state and decide if an error has occurred.
+ * A return of 1 indicates an error. Also if handle_errors is true
+ * then attempt to handle and clean up after the error
+ */
+
+static int amd76x_process_error_info (struct mem_ctl_info *mci,
+ struct amd76x_error_info *info, int handle_errors)
+{
+ int error_found;
+ u32 row;
+
+ error_found = 0;
+
+ /*
+ * Check for an uncorrectable error
+ */
+ if (info->ecc_mode_status & BIT(8)) {
+ error_found = 1;
+
+ if (handle_errors) {
+ row = (info->ecc_mode_status >> 4) & 0xf;
+ edac_mc_handle_ue(mci,
+ mci->csrows[row].first_page, 0, row,
+ mci->ctl_name);
+ }
+ }
+
+ /*
+ * Check for a correctable error
+ */
+ if (info->ecc_mode_status & BIT(9)) {
+ error_found = 1;
+
+ if (handle_errors) {
+ row = info->ecc_mode_status & 0xf;
+ edac_mc_handle_ce(mci,
+ mci->csrows[row].first_page, 0, 0, row, 0,
+ mci->ctl_name);
+ }
+ }
+ return error_found;
+}
+
+/**
+ * amd76x_check - Poll the controller
+ * @mci: Memory controller
+ *
+ * Called by the poll handlers this function reads the status
+ * from the controller and checks for errors.
+ */
+
+static void amd76x_check(struct mem_ctl_info *mci)
+{
+ struct amd76x_error_info info;
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ amd76x_get_error_info(mci, &info);
+ amd76x_process_error_info(mci, &info, 1);
+}
+
+
+/**
+ * amd76x_probe1 - Perform set up for detected device
+ * @pdev; PCI device detected
+ * @dev_idx: Device type index
+ *
+ * We have found an AMD76x and now need to set up the memory
+ * controller status reporting. We configure and set up the
+ * memory controller reporting and claim the device.
+ */
+
+static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ enum edac_type ems_modes[] = {
+ EDAC_NONE,
+ EDAC_EC,
+ EDAC_SECDED,
+ EDAC_SECDED
+ };
+ u32 ems;
+ u32 ems_mode;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
+ ems_mode = (ems >> 10) & 0x3;
+
+ mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ mci->pdev = pci_dev_get(pdev);
+ mci->mtype_cap = MEM_FLAG_RDDR;
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = ems_mode ?
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.4.2.5 $";
+ mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+ mci->edac_check = amd76x_check;
+ mci->ctl_page_to_phys = NULL;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ struct csrow_info *csrow = &mci->csrows[index];
+ u32 mba;
+ u32 mba_base;
+ u32 mba_mask;
+ u32 dms;
+
+ /* find the DRAM Chip Select Base address and mask */
+ pci_read_config_dword(mci->pdev,
+ AMD76X_MEM_BASE_ADDR + (index * 4),
+ &mba);
+
+ if (!(mba & BIT(0)))
+ continue;
+
+ mba_base = mba & 0xff800000UL;
+ mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
+
+ pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
+ &dms);
+
+ csrow->first_page = mba_base >> PAGE_SHIFT;
+ csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->page_mask = mba_mask >> PAGE_SHIFT;
+ csrow->grain = csrow->nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_RDDR;
+ csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ csrow->edac_mode = ems_modes[ems_mode];
+ }
+
+ /* clear counters */
+ pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
+ (u32) (0x3 << 8));
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci) {
+ if(mci->pdev)
+ pci_dev_put(mci->pdev);
+ edac_mc_free(mci);
+ }
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit amd76x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return amd76x_probe1(pdev, ent->driver_data);
+}
+
+
+/**
+ * amd76x_remove_one - driver shutdown
+ * @pdev: PCI device being handed back
+ *
+ * Called when the driver is unloaded. Find the matching mci
+ * structure for the device then delete the mci and free the
+ * resources.
+ */
+
+static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ return;
+ if (edac_mc_del_mc(mci))
+ return;
+ pci_dev_put(mci->pdev);
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD762},
+ {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD761},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
+
+
+static struct pci_driver amd76x_driver = {
+ .name = BS_MOD_STR,
+ .probe = amd76x_init_one,
+ .remove = __devexit_p(amd76x_remove_one),
+ .id_table = amd76x_pci_tbl,
+};
+
+static int __init amd76x_init(void)
+{
+ return pci_register_driver(&amd76x_driver);
+}
+
+static void __exit amd76x_exit(void)
+{
+ pci_unregister_driver(&amd76x_driver);
+}
+
+module_init(amd76x_init);
+module_exit(amd76x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644
index 00000000000..770a5a63307
--- /dev/null
+++ b/drivers/edac/e752x_edac.c
@@ -0,0 +1,1071 @@
+/*
+ * Intel e752x Memory Controller kernel module
+ * (C) 2004 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e752x_chips" below for supported chipsets
+ *
+ * Written by Tom Zimmerman
+ *
+ * Contributors:
+ * Thayne Harbaugh at realmsys.com (?)
+ * Wang Zhenyu at intel.com
+ * Dave Jiang at mvista.com
+ *
+ * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_0
+#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
+#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
+#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
+#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_0
+#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
+#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
+#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
+#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_0
+#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
+#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
+#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
+#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
+
+#define E752X_NR_CSROWS 8 /* number of csrows */
+
+
+/* E752X register addresses - device 0 function 0 */
+#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
+#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
+ /*
+ * 31:30 Device width row 7
+ * 01=x8 10=x4 11=x8 DDR2
+ * 27:26 Device width row 6
+ * 23:22 Device width row 5
+ * 19:20 Device width row 4
+ * 15:14 Device width row 3
+ * 11:10 Device width row 2
+ * 7:6 Device width row 1
+ * 3:2 Device width row 0
+ */
+#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
+ /* FIXME:IS THIS RIGHT? */
+ /*
+ * 22 Number channels 0=1,1=2
+ * 19:18 DRB Granularity 32/64MB
+ */
+#define E752X_DRM 0x80 /* Dimm mapping register */
+#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
+ /*
+ * 14:12 1 single A, 2 single B, 3 dual
+ */
+#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
+#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
+#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
+#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
+
+/* E752X register addresses - device 0 function 1 */
+#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
+#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
+#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
+#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
+#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
+#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
+#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
+#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
+#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
+#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
+#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
+#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
+#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
+#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */
+#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
+#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
+#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
+#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
+#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
+#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */
+ /* error address register (32b) */
+ /*
+ * 31 Reserved
+ * 30:2 CE address (64 byte block 34:6)
+ * 1 Reserved
+ * 0 HiLoCS
+ */
+#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
+ /* error syndrome register (16b) */
+#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
+ /* error syndrome register (16b) */
+#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
+
+/* ICH5R register addresses - device 30 function 0 */
+#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
+#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
+#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
+
+enum e752x_chips {
+ E7520 = 0,
+ E7525 = 1,
+ E7320 = 2
+};
+
+
+struct e752x_pvt {
+ struct pci_dev *bridge_ck;
+ struct pci_dev *dev_d0f0;
+ struct pci_dev *dev_d0f1;
+ u32 tolm;
+ u32 remapbase;
+ u32 remaplimit;
+ int mc_symmetric;
+ u8 map[8];
+ int map_type;
+ const struct e752x_dev_info *dev_info;
+};
+
+
+struct e752x_dev_info {
+ u16 err_dev;
+ const char *ctl_name;
+};
+
+struct e752x_error_info {
+ u32 ferr_global;
+ u32 nerr_global;
+ u8 hi_ferr;
+ u8 hi_nerr;
+ u16 sysbus_ferr;
+ u16 sysbus_nerr;
+ u8 buf_ferr;
+ u8 buf_nerr;
+ u16 dram_ferr;
+ u16 dram_nerr;
+ u32 dram_sec1_add;
+ u32 dram_sec2_add;
+ u16 dram_sec1_syndrome;
+ u16 dram_sec2_syndrome;
+ u32 dram_ded_add;
+ u32 dram_scrb_add;
+ u32 dram_retr_add;
+};
+
+static const struct e752x_dev_info e752x_devs[] = {
+ [E7520] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+ .ctl_name = "E7520"},
+ [E7525] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+ .ctl_name = "E7525"},
+ [E7320] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+ .ctl_name = "E7320"},
+};
+
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+ unsigned long page)
+{
+ u32 remap;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if (page < pvt->tolm)
+ return page;
+ if ((page >= 0x100000) && (page < pvt->remapbase))
+ return page;
+ remap = (page - pvt->tolm) + pvt->remapbase;
+ if (remap < pvt->remaplimit)
+ return remap;
+ printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+ return pvt->tolm - 1;
+}
+
+static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
+ u32 sec1_add, u16 sec1_syndrome)
+{
+ u32 page;
+ int row;
+ int channel;
+ int i;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* convert the addr to 4k page */
+ page = sec1_add >> (PAGE_SHIFT - 4);
+
+ /* FIXME - check for -1 */
+ if (pvt->mc_symmetric) {
+ /* chip select are bits 14 & 13 */
+ row = ((page >> 1) & 3);
+ printk(KERN_WARNING
+ "Test row %d Table %d %d %d %d %d %d %d %d\n",
+ row, pvt->map[0], pvt->map[1], pvt->map[2],
+ pvt->map[3], pvt->map[4], pvt->map[5],
+ pvt->map[6], pvt->map[7]);
+
+ /* test for channel remapping */
+ for (i = 0; i < 8; i++) {
+ if (pvt->map[i] == row)
+ break;
+ }
+ printk(KERN_WARNING "Test computed row %d\n", i);
+ if (i < 8)
+ row = i;
+ else
+ printk(KERN_WARNING
+ "MC%d: row %d not found in remap table\n",
+ mci->mc_idx, row);
+ } else
+ row = edac_mc_find_csrow_by_page(mci, page);
+ /* 0 = channel A, 1 = channel B */
+ channel = !(error_one & 1);
+
+ if (!pvt->map_type)
+ row = 7 - row;
+ edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
+ "e752x CE");
+}
+
+
+static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
+ u32 sec1_add, u16 sec1_syndrome, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
+}
+
+static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
+ u32 scrb_add)
+{
+ u32 error_2b, block_page;
+ int row;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if (error_one & 0x0202) {
+ error_2b = ded_add;
+ /* convert to 4k address */
+ block_page = error_2b >> (PAGE_SHIFT - 4);
+ row = pvt->mc_symmetric ?
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row,
+ "e752x UE from Read");
+ }
+ if (error_one & 0x0404) {
+ error_2b = scrb_add;
+ /* convert to 4k address */
+ block_page = error_2b >> (PAGE_SHIFT - 4);
+ row = pvt->mc_symmetric ?
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row,
+ "e752x UE from Scruber");
+ }
+}
+
+static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
+ u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ue(mci, error_one, ded_add, scrb_add);
+}
+
+static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
+ int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (!handle_error)
+ return;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+}
+
+static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
+ u32 retry_add)
+{
+ u32 error_1b, page;
+ int row;
+ struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+ error_1b = retry_add;
+ page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
+ row = pvt->mc_symmetric ?
+ ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
+ edac_mc_find_csrow_by_page(mci, page);
+ printk(KERN_WARNING
+ "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
+ mci->mc_idx, (long unsigned int) page, row);
+}
+
+static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
+ u32 retry_add, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_process_ded_retry(mci, error, retry_add);
+}
+
+static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
+ int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ printk(KERN_WARNING "MC%d: Memory threshold CE\n",
+ mci->mc_idx);
+}
+
+static char *global_message[11] = {
+ "PCI Express C1", "PCI Express C", "PCI Express B1",
+ "PCI Express B", "PCI Express A1", "PCI Express A",
+ "DMA Controler", "HUB Interface", "System Bus",
+ "DRAM Controler", "Internal Buffer"
+};
+
+static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
+
+static void do_global_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 11; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError %s\n",
+ fatal_message[fatal], global_message[i]);
+ }
+}
+
+static inline void global_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_global_error(fatal, errors);
+}
+
+static char *hub_message[7] = {
+ "HI Address or Command Parity", "HI Illegal Access",
+ "HI Internal Parity", "Out of Range Access",
+ "HI Data Parity", "Enhanced Config Access",
+ "Hub Interface Target Abort"
+};
+
+static void do_hub_error(int fatal, u8 errors)
+{
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError %s\n",
+ fatal_message[fatal], hub_message[i]);
+ }
+}
+
+static inline void hub_error(int fatal, u8 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_hub_error(fatal, errors);
+}
+
+static char *membuf_message[4] = {
+ "Internal PMWB to DRAM parity",
+ "Internal PMWB to System Bus Parity",
+ "Internal System Bus or IO to PMWB Parity",
+ "Internal DRAM to PMWB Parity"
+};
+
+static void do_membuf_error(u8 errors)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "Non-Fatal Error %s\n",
+ membuf_message[i]);
+ }
+}
+
+static inline void membuf_error(u8 errors, int *error_found, int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_membuf_error(errors);
+}
+
+#if 0
+char *sysbus_message[10] = {
+ "Addr or Request Parity",
+ "Data Strobe Glitch",
+ "Addr Strobe Glitch",
+ "Data Parity",
+ "Addr Above TOM",
+ "Non DRAM Lock Error",
+ "MCERR", "BINIT",
+ "Memory Parity",
+ "IO Subsystem Parity"
+};
+#endif /* 0 */
+
+static void do_sysbus_error(int fatal, u32 errors)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (errors & (1 << i))
+ printk(KERN_WARNING "%sError System Bus %s\n",
+ fatal_message[fatal], global_message[i]);
+ }
+}
+
+static inline void sysbus_error(int fatal, u32 errors, int *error_found,
+ int handle_error)
+{
+ *error_found = 1;
+
+ if (handle_error)
+ do_sysbus_error(fatal, errors);
+}
+
+static void e752x_check_hub_interface (struct e752x_error_info *info,
+ int *error_found, int handle_error)
+{
+ u8 stat8;
+
+ //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
+ stat8 = info->hi_ferr;
+ if(stat8 & 0x7f) { /* Error, so process */
+ stat8 &= 0x7f;
+ if(stat8 & 0x2b)
+ hub_error(1, stat8 & 0x2b, error_found, handle_error);
+ if(stat8 & 0x54)
+ hub_error(0, stat8 & 0x54, error_found, handle_error);
+ }
+ //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+ stat8 = info->hi_nerr;
+ if(stat8 & 0x7f) { /* Error, so process */
+ stat8 &= 0x7f;
+ if (stat8 & 0x2b)
+ hub_error(1, stat8 & 0x2b, error_found, handle_error);
+ if(stat8 & 0x54)
+ hub_error(0, stat8 & 0x54, error_found, handle_error);
+ }
+}
+
+static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
+ int handle_error)
+{
+ u32 stat32, error32;
+
+ //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
+ stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
+
+ if (stat32 == 0)
+ return; /* no errors */
+
+ error32 = (stat32 >> 16) & 0x3ff;
+ stat32 = stat32 & 0x3ff;
+ if(stat32 & 0x083)
+ sysbus_error(1, stat32 & 0x083, error_found, handle_error);
+ if(stat32 & 0x37c)
+ sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
+ if(error32 & 0x083)
+ sysbus_error(1, error32 & 0x083, error_found, handle_error);
+ if(error32 & 0x37c)
+ sysbus_error(0, error32 & 0x37c, error_found, handle_error);
+}
+
+static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
+ int handle_error)
+{
+ u8 stat8;
+
+ stat8 = info->buf_ferr;
+ if (stat8 & 0x0f) { /* Error, so process */
+ stat8 &= 0x0f;
+ membuf_error(stat8, error_found, handle_error);
+ }
+ stat8 = info->buf_nerr;
+ if (stat8 & 0x0f) { /* Error, so process */
+ stat8 &= 0x0f;
+ membuf_error(stat8, error_found, handle_error);
+ }
+}
+
+static void e752x_check_dram (struct mem_ctl_info *mci,
+ struct e752x_error_info *info, int *error_found, int handle_error)
+{
+ u16 error_one, error_next;
+
+ error_one = info->dram_ferr;
+ error_next = info->dram_nerr;
+
+ /* decode and report errors */
+ if(error_one & 0x0101) /* check first error correctable */
+ process_ce(mci, error_one, info->dram_sec1_add,
+ info->dram_sec1_syndrome, error_found,
+ handle_error);
+
+ if(error_next & 0x0101) /* check next error correctable */
+ process_ce(mci, error_next, info->dram_sec2_add,
+ info->dram_sec2_syndrome, error_found,
+ handle_error);
+
+ if(error_one & 0x4040)
+ process_ue_no_info_wr(mci, error_found, handle_error);
+
+ if(error_next & 0x4040)
+ process_ue_no_info_wr(mci, error_found, handle_error);
+
+ if(error_one & 0x2020)
+ process_ded_retry(mci, error_one, info->dram_retr_add,
+ error_found, handle_error);
+
+ if(error_next & 0x2020)
+ process_ded_retry(mci, error_next, info->dram_retr_add,
+ error_found, handle_error);
+
+ if(error_one & 0x0808)
+ process_threshold_ce(mci, error_one, error_found,
+ handle_error);
+
+ if(error_next & 0x0808)
+ process_threshold_ce(mci, error_next, error_found,
+ handle_error);
+
+ if(error_one & 0x0606)
+ process_ue(mci, error_one, info->dram_ded_add,
+ info->dram_scrb_add, error_found, handle_error);
+
+ if(error_next & 0x0606)
+ process_ue(mci, error_next, info->dram_ded_add,
+ info->dram_scrb_add, error_found, handle_error);
+}
+
+static void e752x_get_error_info (struct mem_ctl_info *mci,
+ struct e752x_error_info *info)
+{
+ struct pci_dev *dev;
+ struct e752x_pvt *pvt;
+
+ memset(info, 0, sizeof(*info));
+ pvt = (struct e752x_pvt *) mci->pvt_info;
+ dev = pvt->dev_d0f1;
+
+ pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
+
+ if (info->ferr_global) {
+ pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
+ pci_read_config_word(dev, E752X_SYSBUS_FERR,
+ &info->sysbus_ferr);
+ pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
+ pci_read_config_word(dev, E752X_DRAM_FERR,
+ &info->dram_ferr);
+ pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
+ &info->dram_sec1_add);
+ pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
+ &info->dram_sec1_syndrome);
+ pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
+ &info->dram_ded_add);
+ pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
+ &info->dram_scrb_add);
+ pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
+ &info->dram_retr_add);
+
+ if (info->hi_ferr & 0x7f)
+ pci_write_config_byte(dev, E752X_HI_FERR,
+ info->hi_ferr);
+
+ if (info->sysbus_ferr)
+ pci_write_config_word(dev, E752X_SYSBUS_FERR,
+ info->sysbus_ferr);
+
+ if (info->buf_ferr & 0x0f)
+ pci_write_config_byte(dev, E752X_BUF_FERR,
+ info->buf_ferr);
+
+ if (info->dram_ferr)
+ pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+ info->dram_ferr, info->dram_ferr);
+
+ pci_write_config_dword(dev, E752X_FERR_GLOBAL,
+ info->ferr_global);
+ }
+
+ pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
+
+ if (info->nerr_global) {
+ pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
+ pci_read_config_word(dev, E752X_SYSBUS_NERR,
+ &info->sysbus_nerr);
+ pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
+ pci_read_config_word(dev, E752X_DRAM_NERR,
+ &info->dram_nerr);
+ pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
+ &info->dram_sec2_add);
+ pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
+ &info->dram_sec2_syndrome);
+
+ if (info->hi_nerr & 0x7f)
+ pci_write_config_byte(dev, E752X_HI_NERR,
+ info->hi_nerr);
+
+ if (info->sysbus_nerr)
+ pci_write_config_word(dev, E752X_SYSBUS_NERR,
+ info->sysbus_nerr);
+
+ if (info->buf_nerr & 0x0f)
+ pci_write_config_byte(dev, E752X_BUF_NERR,
+ info->buf_nerr);
+
+ if (info->dram_nerr)
+ pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+ info->dram_nerr, info->dram_nerr);
+
+ pci_write_config_dword(dev, E752X_NERR_GLOBAL,
+ info->nerr_global);
+ }
+}
+
+static int e752x_process_error_info (struct mem_ctl_info *mci,
+ struct e752x_error_info *info, int handle_errors)
+{
+ u32 error32, stat32;
+ int error_found;
+
+ error_found = 0;
+ error32 = (info->ferr_global >> 18) & 0x3ff;
+ stat32 = (info->ferr_global >> 4) & 0x7ff;
+
+ if (error32)
+ global_error(1, error32, &error_found, handle_errors);
+
+ if (stat32)
+ global_error(0, stat32, &error_found, handle_errors);
+
+ error32 = (info->nerr_global >> 18) & 0x3ff;
+ stat32 = (info->nerr_global >> 4) & 0x7ff;
+
+ if (error32)
+ global_error(1, error32, &error_found, handle_errors);
+
+ if (stat32)
+ global_error(0, stat32, &error_found, handle_errors);
+
+ e752x_check_hub_interface(info, &error_found, handle_errors);
+ e752x_check_sysbus(info, &error_found, handle_errors);
+ e752x_check_membuf(info, &error_found, handle_errors);
+ e752x_check_dram(mci, info, &error_found, handle_errors);
+ return error_found;
+}
+
+static void e752x_check(struct mem_ctl_info *mci)
+{
+ struct e752x_error_info info;
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ e752x_get_error_info(mci, &info);
+ e752x_process_error_info(mci, &info, 1);
+}
+
+static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ u16 pci_data, stat;
+ u32 stat32;
+ u16 stat16;
+ u8 stat8;
+ struct mem_ctl_info *mci = NULL;
+ struct e752x_pvt *pvt = NULL;
+ u16 ddrcsr;
+ u32 drc;
+ int drc_chan; /* Number of channels 0=1chan,1=2chan */
+ int drc_drbg; /* DRB granularity 0=64mb,1=128mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ u32 dra;
+ unsigned long last_cumul_size;
+ struct pci_dev *pres_dev;
+ struct pci_dev *dev = NULL;
+
+ debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+ debugf0("Starting Probe1\n");
+
+ /* enable device 0 function 1 */
+ pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
+ stat8 |= (1 << 5);
+ pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
+
+ /* need to find out the number of channels */
+ pci_read_config_dword(pdev, E752X_DRC, &drc);
+ pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
+ /* FIXME: should check >>12 or 0xf, true for all? */
+ /* Dual channel = 1, Single channel = 0 */
+ drc_chan = (((ddrcsr >> 12) & 3) == 3);
+ drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
+ drc_ddim = (drc >> 20) & 0x3;
+
+ mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
+ EDAC_FLAG_S4ECD4ED;
+ /* FIXME - what if different memory types are in different csrows? */
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.5.2.11 $";
+ mci->pdev = pdev;
+
+ debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+ pvt = (struct e752x_pvt *) mci->pvt_info;
+ pvt->dev_info = &e752x_devs[dev_idx];
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev,
+ pvt->bridge_ck);
+ if (pvt->bridge_ck == NULL)
+ pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+ PCI_DEVFN(0, 1));
+ if (pvt->bridge_ck == NULL) {
+ printk(KERN_ERR "MC: error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+ goto fail;
+ }
+ pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
+
+ debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+ mci->ctl_name = pvt->dev_info->ctl_name;
+ mci->edac_check = e752x_check;
+ mci->ctl_page_to_phys = ctl_page_to_phys;
+
+ /* find out the device types */
+ pci_read_config_dword(pdev, E752X_DRA, &dra);
+
+ /*
+ * The dram row boundary (DRB) reg values are boundary address for
+ * each DRAM row with a granularity of 64 or 128MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u8 value;
+ u32 cumul_size;
+ /* mem_dev 0=x8, 1=x4 */
+ int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ mem_dev = (mem_dev == 2);
+ pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
+ /* convert a 128 or 64 MiB DRB to a page size. */
+ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ csrow->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ csrow->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ csrow->edac_mode = EDAC_NONE;
+ }
+
+ /* Fill in the memory map table */
+ {
+ u8 value;
+ u8 last = 0;
+ u8 row = 0;
+ for (index = 0; index < 8; index += 2) {
+
+ pci_read_config_byte(mci->pdev, E752X_DRB + index,
+ &value);
+ /* test if there is a dimm in this slot */
+ if (value == last) {
+ /* no dimm in the slot, so flag it as empty */
+ pvt->map[index] = 0xff;
+ pvt->map[index + 1] = 0xff;
+ } else { /* there is a dimm in the slot */
+ pvt->map[index] = row;
+ row++;
+ last = value;
+ /* test the next value to see if the dimm is
+ double sided */
+ pci_read_config_byte(mci->pdev,
+ E752X_DRB + index + 1,
+ &value);
+ pvt->map[index + 1] = (value == last) ?
+ 0xff : /* the dimm is single sided,
+ so flag as empty */
+ row; /* this is a double sided dimm
+ to save the next row # */
+ row++;
+ last = value;
+ }
+ }
+ }
+
+ /* set the map type. 1 = normal, 0 = reversed */
+ pci_read_config_byte(mci->pdev, E752X_DRM, &stat8);
+ pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
+
+ mci->edac_cap |= EDAC_FLAG_NONE;
+
+ debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
+ __func__);
+ /* load the top of low memory, remap base, and remap limit vars */
+ pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
+ pvt->tolm = ((u32) pci_data) << 4;
+ pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data);
+ pvt->remapbase = ((u32) pci_data) << 14;
+ pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
+ pvt->remaplimit = ((u32) pci_data) << 14;
+ printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n",
+ __func__);
+ goto fail;
+ }
+
+ /* Walk through the PCI table and clear errors */
+ switch (dev_idx) {
+ case E7520:
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_7520_0, NULL);
+ break;
+ case E7525:
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_7525_0, NULL);
+ break;
+ case E7320:
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_7320_0, NULL);
+ break;
+ }
+
+
+ pvt->dev_d0f0 = dev;
+ for (pres_dev = dev;
+ ((struct pci_dev *) pres_dev->global_list.next != dev);
+ pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
+ pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
+ stat = (u16) (stat32 >> 16);
+ /* clear any error bits */
+ if (stat32 & ((1 << 6) + (1 << 8)))
+ pci_write_config_word(pres_dev, PCI_STATUS, stat);
+ }
+ /* find the error reporting device and clear errors */
+ dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
+ /* Turn off error disable & SMI in case the BIOS turned it on */
+ pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+ pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
+ pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
+ pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
+ pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
+ pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
+ /* clear other MCH errors */
+ pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
+ pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
+ pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
+ pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
+ pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
+ pci_write_config_byte(dev, E752X_HI_FERR, stat8);
+ pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
+ pci_write_config_byte(dev, E752X_HI_NERR, stat8);
+ pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
+ pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
+ pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
+ pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
+ pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
+ pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
+ pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
+ pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
+ pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
+ pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci) {
+ if (pvt->dev_d0f0)
+ pci_dev_put(pvt->dev_d0f0);
+ if (pvt->dev_d0f1)
+ pci_dev_put(pvt->dev_d0f1);
+ if (pvt->bridge_ck)
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+ }
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e752x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* wake up and enable device */
+ if(pci_enable_device(pdev) < 0)
+ return -EIO;
+ return e752x_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit e752x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct e752x_pvt *pvt;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ return;
+
+ if (edac_mc_del_mc(mci))
+ return;
+
+ pvt = (struct e752x_pvt *) mci->pvt_info;
+ pci_dev_put(pvt->dev_d0f0);
+ pci_dev_put(pvt->dev_d0f1);
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7520},
+ {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7525},
+ {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7320},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
+
+
+static struct pci_driver e752x_driver = {
+ name: BS_MOD_STR,
+ probe: e752x_init_one,
+ remove: __devexit_p(e752x_remove_one),
+ id_table: e752x_pci_tbl,
+};
+
+
+static int __init e752x_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ pci_rc = pci_register_driver(&e752x_driver);
+ return (pci_rc < 0) ? pci_rc : 0;
+}
+
+
+static void __exit e752x_exit(void)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&e752x_driver);
+}
+
+
+module_init(e752x_init);
+module_exit(e752x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
+MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644
index 00000000000..d5e320dfc66
--- /dev/null
+++ b/drivers/edac/e7xxx_edac.c
@@ -0,0 +1,558 @@
+/*
+ * Intel e7xxx Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e7xxx_chips" below for supported chipsets
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Contributors:
+ * Eric Biederman (Linux Networx)
+ * Tom Zimmerman (Linux Networx)
+ * Jim Garlick (Lawrence Livermore National Labs)
+ * Dave Peterson (Lawrence Livermore National Labs)
+ * That One Guy (Some other place)
+ * Wang Zhenyu (intel.com)
+ *
+ * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_0
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
+#endif /* PCI_DEVICE_ID_INTEL_7205_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
+#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
+#endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_0
+#define PCI_DEVICE_ID_INTEL_7500_0 0x2540
+#endif /* PCI_DEVICE_ID_INTEL_7500_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
+#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
+#endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_0
+#define PCI_DEVICE_ID_INTEL_7501_0 0x254c
+#endif /* PCI_DEVICE_ID_INTEL_7501_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
+#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
+#endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_0
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#endif /* PCI_DEVICE_ID_INTEL_7505_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
+#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
+#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
+
+
+#define E7XXX_NR_CSROWS 8 /* number of csrows */
+#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+
+
+/* E7XXX register addresses - device 0 function 0 */
+#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
+#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
+ /*
+ * 31 Device width row 7 0=x8 1=x4
+ * 27 Device width row 6
+ * 23 Device width row 5
+ * 19 Device width row 4
+ * 15 Device width row 3
+ * 11 Device width row 2
+ * 7 Device width row 1
+ * 3 Device width row 0
+ */
+#define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */
+ /*
+ * 22 Number channels 0=1,1=2
+ * 19:18 DRB Granularity 32/64MB
+ */
+#define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
+#define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
+#define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
+
+/* E7XXX register addresses - device 0 function 1 */
+#define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */
+#define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */
+#define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */
+ /* error address register (32b) */
+ /*
+ * 31:28 Reserved
+ * 27:6 CE address (4k block 33:12)
+ * 5:0 Reserved
+ */
+#define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */
+ /* error address register (32b) */
+ /*
+ * 31:28 Reserved
+ * 27:6 CE address (4k block 33:12)
+ * 5:0 Reserved
+ */
+#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
+ /* error syndrome register (16b) */
+
+enum e7xxx_chips {
+ E7500 = 0,
+ E7501,
+ E7505,
+ E7205,
+};
+
+
+struct e7xxx_pvt {
+ struct pci_dev *bridge_ck;
+ u32 tolm;
+ u32 remapbase;
+ u32 remaplimit;
+ const struct e7xxx_dev_info *dev_info;
+};
+
+
+struct e7xxx_dev_info {
+ u16 err_dev;
+ const char *ctl_name;
+};
+
+
+struct e7xxx_error_info {
+ u8 dram_ferr;
+ u8 dram_nerr;
+ u32 dram_celog_add;
+ u16 dram_celog_syndrome;
+ u32 dram_uelog_add;
+};
+
+static const struct e7xxx_dev_info e7xxx_devs[] = {
+ [E7500] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+ .ctl_name = "E7500"},
+ [E7501] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+ .ctl_name = "E7501"},
+ [E7505] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+ .ctl_name = "E7505"},
+ [E7205] = {
+ .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+ .ctl_name = "E7205"},
+};
+
+
+/* FIXME - is this valid for both SECDED and S4ECD4ED? */
+static inline int e7xxx_find_channel(u16 syndrome)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if ((syndrome & 0xff00) == 0)
+ return 0;
+ if ((syndrome & 0x00ff) == 0)
+ return 1;
+ if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
+ return 0;
+ return 1;
+}
+
+
+static unsigned long
+ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
+{
+ u32 remap;
+ struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if ((page < pvt->tolm) ||
+ ((page >= 0x100000) && (page < pvt->remapbase)))
+ return page;
+ remap = (page - pvt->tolm) + pvt->remapbase;
+ if (remap < pvt->remaplimit)
+ return remap;
+ printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+ return pvt->tolm - 1;
+}
+
+
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+ u32 error_1b, page;
+ u16 syndrome;
+ int row;
+ int channel;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* read the error address */
+ error_1b = info->dram_celog_add;
+ /* FIXME - should use PAGE_SHIFT */
+ page = error_1b >> 6; /* convert the address to 4k page */
+ /* read the syndrome */
+ syndrome = info->dram_celog_syndrome;
+ /* FIXME - check for -1 */
+ row = edac_mc_find_csrow_by_page(mci, page);
+ /* convert syndrome to channel */
+ channel = e7xxx_find_channel(syndrome);
+ edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
+ "e7xxx CE");
+}
+
+
+static void process_ce_no_info(struct mem_ctl_info *mci)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+}
+
+
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+ u32 error_2b, block_page;
+ int row;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* read the error address */
+ error_2b = info->dram_uelog_add;
+ /* FIXME - should use PAGE_SHIFT */
+ block_page = error_2b >> 6; /* convert to 4k address */
+ row = edac_mc_find_csrow_by_page(mci, block_page);
+ edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+}
+
+
+static void process_ue_no_info(struct mem_ctl_info *mci)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+}
+
+
+static void e7xxx_get_error_info (struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info)
+{
+ struct e7xxx_pvt *pvt;
+
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
+ &info->dram_ferr);
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
+ &info->dram_nerr);
+
+ if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
+ pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
+ &info->dram_celog_add);
+ pci_read_config_word(pvt->bridge_ck,
+ E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
+ }
+
+ if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
+ pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
+ &info->dram_uelog_add);
+
+ if (info->dram_ferr & 3)
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
+ 0x03);
+
+ if (info->dram_nerr & 3)
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
+ 0x03);
+}
+
+
+static int e7xxx_process_error_info (struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info, int handle_errors)
+{
+ int error_found;
+
+ error_found = 0;
+
+ /* decode and report errors */
+ if (info->dram_ferr & 1) { /* check first error correctable */
+ error_found = 1;
+
+ if (handle_errors)
+ process_ce(mci, info);
+ }
+
+ if (info->dram_ferr & 2) { /* check first error uncorrectable */
+ error_found = 1;
+
+ if (handle_errors)
+ process_ue(mci, info);
+ }
+
+ if (info->dram_nerr & 1) { /* check next error correctable */
+ error_found = 1;
+
+ if (handle_errors) {
+ if (info->dram_ferr & 1)
+ process_ce_no_info(mci);
+ else
+ process_ce(mci, info);
+ }
+ }
+
+ if (info->dram_nerr & 2) { /* check next error uncorrectable */
+ error_found = 1;
+
+ if (handle_errors) {
+ if (info->dram_ferr & 2)
+ process_ue_no_info(mci);
+ else
+ process_ue(mci, info);
+ }
+ }
+
+ return error_found;
+}
+
+
+static void e7xxx_check(struct mem_ctl_info *mci)
+{
+ struct e7xxx_error_info info;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ e7xxx_get_error_info(mci, &info);
+ e7xxx_process_error_info(mci, &info, 1);
+}
+
+
+static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ u16 pci_data;
+ struct mem_ctl_info *mci = NULL;
+ struct e7xxx_pvt *pvt = NULL;
+ u32 drc;
+ int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
+ int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ u32 dra;
+ unsigned long last_cumul_size;
+
+
+ debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+
+ /* need to find out the number of channels */
+ pci_read_config_dword(pdev, E7XXX_DRC, &drc);
+ /* only e7501 can be single channel */
+ if (dev_idx == E7501) {
+ drc_chan = ((drc >> 22) & 0x1);
+ drc_drbg = (drc >> 18) & 0x3;
+ }
+ drc_ddim = (drc >> 20) & 0x3;
+
+ mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->mtype_cap = MEM_FLAG_RDDR;
+ mci->edac_ctl_cap =
+ EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
+ /* FIXME - what if different memory types are in different csrows? */
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.5.2.9 $";
+ mci->pdev = pdev;
+
+ debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pvt->dev_info = &e7xxx_devs[dev_idx];
+ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+ pvt->dev_info->err_dev,
+ pvt->bridge_ck);
+ if (!pvt->bridge_ck) {
+ printk(KERN_ERR
+ "MC: error reporting device not found:"
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+ mci->ctl_name = pvt->dev_info->ctl_name;
+
+ mci->edac_check = e7xxx_check;
+ mci->ctl_page_to_phys = ctl_page_to_phys;
+
+ /* find out the device types */
+ pci_read_config_dword(pdev, E7XXX_DRA, &dra);
+
+ /*
+ * The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u8 value;
+ u32 cumul_size;
+ /* mem_dev 0=x8, 1=x4 */
+ int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
+ /* convert a 64 or 32 MiB DRB to a page size. */
+ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ csrow->mtype = MEM_RDDR; /* only one type supported */
+ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ csrow->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ csrow->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ csrow->edac_mode = EDAC_NONE;
+ }
+
+ mci->edac_cap |= EDAC_FLAG_NONE;
+
+ debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
+ __func__);
+ /* load the top of low memory, remap base, and remap limit vars */
+ pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
+ pvt->tolm = ((u32) pci_data) << 4;
+ pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data);
+ pvt->remapbase = ((u32) pci_data) << 14;
+ pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
+ pvt->remaplimit = ((u32) pci_data) << 14;
+ printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
+
+ /* clear any pending errors, or initial state bits */
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
+ pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
+
+ if (edac_mc_add_mc(mci) != 0) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n",
+ __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci != NULL) {
+ if(pvt != NULL && pvt->bridge_ck)
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit
+e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* wake up and enable device */
+ return pci_enable_device(pdev) ?
+ -EIO : e7xxx_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct e7xxx_pvt *pvt;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
+ edac_mc_del_mc(mci)) {
+ pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pci_dev_put(pvt->bridge_ck);
+ edac_mc_free(mci);
+ }
+}
+
+
+static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7205},
+ {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7500},
+ {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7501},
+ {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7505},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
+
+
+static struct pci_driver e7xxx_driver = {
+ .name = BS_MOD_STR,
+ .probe = e7xxx_init_one,
+ .remove = __devexit_p(e7xxx_remove_one),
+ .id_table = e7xxx_pci_tbl,
+};
+
+
+static int __init e7xxx_init(void)
+{
+ return pci_register_driver(&e7xxx_driver);
+}
+
+
+static void __exit e7xxx_exit(void)
+{
+ pci_unregister_driver(&e7xxx_driver);
+}
+
+module_init(e7xxx_init);
+module_exit(e7xxx_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+ "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644
index 00000000000..4be9bd0a126
--- /dev/null
+++ b/drivers/edac/edac_mc.c
@@ -0,0 +1,2209 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Modified by Dave Peterson and Doug Thompson
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/edac.h>
+
+#include "edac_mc.h"
+
+#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 1;
+EXPORT_SYMBOL(edac_debug_level);
+#endif
+
+/* EDAC Controls, setable by module parameter, and sysfs */
+static int log_ue = 1;
+static int log_ce = 1;
+static int panic_on_ue = 1;
+static int poll_msec = 1000;
+
+static int check_pci_parity = 0; /* default YES check PCI parity */
+static int panic_on_pci_parity; /* default no panic on PCI Parity */
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+
+/* lock to memory controller's control array */
+static DECLARE_MUTEX(mem_ctls_mutex);
+static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
+
+/* Structure of the whitelist and blacklist arrays */
+struct edac_pci_device_list {
+ unsigned int vendor; /* Vendor ID */
+ unsigned int device; /* Deviice ID */
+};
+
+
+#define MAX_LISTED_PCI_DEVICES 32
+
+/* List of PCI devices (vendor-id:device-id) that should be skipped */
+static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
+static int pci_blacklist_count;
+
+/* List of PCI devices (vendor-id:device-id) that should be scanned */
+static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
+static int pci_whitelist_count ;
+
+/* START sysfs data and methods */
+
+static const char *mem_types[] = {
+ [MEM_EMPTY] = "Empty",
+ [MEM_RESERVED] = "Reserved",
+ [MEM_UNKNOWN] = "Unknown",
+ [MEM_FPM] = "FPM",
+ [MEM_EDO] = "EDO",
+ [MEM_BEDO] = "BEDO",
+ [MEM_SDR] = "Unbuffered-SDR",
+ [MEM_RDR] = "Registered-SDR",
+ [MEM_DDR] = "Unbuffered-DDR",
+ [MEM_RDDR] = "Registered-DDR",
+ [MEM_RMBS] = "RMBS"
+};
+
+static const char *dev_types[] = {
+ [DEV_UNKNOWN] = "Unknown",
+ [DEV_X1] = "x1",
+ [DEV_X2] = "x2",
+ [DEV_X4] = "x4",
+ [DEV_X8] = "x8",
+ [DEV_X16] = "x16",
+ [DEV_X32] = "x32",
+ [DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+ [EDAC_UNKNOWN] = "Unknown",
+ [EDAC_NONE] = "None",
+ [EDAC_RESERVED] = "Reserved",
+ [EDAC_PARITY] = "PARITY",
+ [EDAC_EC] = "EC",
+ [EDAC_SECDED] = "SECDED",
+ [EDAC_S2ECD2ED] = "S2ECD2ED",
+ [EDAC_S4ECD4ED] = "S4ECD4ED",
+ [EDAC_S8ECD8ED] = "S8ECD8ED",
+ [EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+/* sysfs object: /sys/devices/system/edac */
+static struct sysdev_class edac_class = {
+ set_kset_name("edac"),
+};
+
+/* sysfs objects:
+ * /sys/devices/system/edac/mc
+ * /sys/devices/system/edac/pci
+ */
+static struct kobject edac_memctrl_kobj;
+static struct kobject edac_pci_kobj;
+
+/*
+ * /sys/devices/system/edac/mc;
+ * data structures and methods
+ */
+static ssize_t memctrl_string_show(void *ptr, char *buffer)
+{
+ char *value = (char*) ptr;
+ return sprintf(buffer, "%s\n", value);
+}
+
+static ssize_t memctrl_int_show(void *ptr, char *buffer)
+{
+ int *value = (int*) ptr;
+ return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = (int*) ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ return count;
+}
+
+struct memctrl_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t (*show)(void *,char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for memory control object */
+static ssize_t
+memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
+{
+ struct memctrl_dev_attribute *memctrl_dev;
+ memctrl_dev = (struct memctrl_dev_attribute*)attr;
+
+ if (memctrl_dev->show)
+ return memctrl_dev->show(memctrl_dev->value, buffer);
+ return -EIO;
+}
+
+static ssize_t
+memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct memctrl_dev_attribute *memctrl_dev;
+ memctrl_dev = (struct memctrl_dev_attribute*)attr;
+
+ if (memctrl_dev->store)
+ return memctrl_dev->store(memctrl_dev->value, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops memctrlfs_ops = {
+ .show = memctrl_dev_show,
+ .store = memctrl_dev_store
+};
+
+#define MEMCTRL_ATTR(_name,_mode,_show,_store) \
+struct memctrl_dev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
+struct memctrl_dev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* cwrow<id> attribute f*/
+MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
+
+/* csrow<id> control files */
+MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+
+
+/* Base Attributes of the memory ECC object */
+static struct memctrl_dev_attribute *memctrl_attr[] = {
+ &attr_panic_on_ue,
+ &attr_log_ue,
+ &attr_log_ce,
+ &attr_poll_msec,
+ &attr_mc_version,
+ NULL,
+};
+
+/* Main MC kobject release() function */
+static void edac_memctrl_master_release(struct kobject *kobj)
+{
+ debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_memctrl = {
+ .release = edac_memctrl_master_release,
+ .sysfs_ops = &memctrlfs_ops,
+ .default_attrs = (struct attribute **) memctrl_attr,
+};
+
+
+/* Initialize the main sysfs entries for edac:
+ * /sys/devices/system/edac
+ *
+ * and children
+ *
+ * Return: 0 SUCCESS
+ * !0 FAILURE
+ */
+static int edac_sysfs_memctrl_setup(void)
+{
+ int err=0;
+
+ debugf1("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* create the /sys/devices/system/edac directory */
+ err = sysdev_class_register(&edac_class);
+ if (!err) {
+ /* Init the MC's kobject */
+ memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
+ kobject_init(&edac_memctrl_kobj);
+
+ edac_memctrl_kobj.parent = &edac_class.kset.kobj;
+ edac_memctrl_kobj.ktype = &ktype_memctrl;
+
+ /* generate sysfs "..../edac/mc" */
+ err = kobject_set_name(&edac_memctrl_kobj,"mc");
+ if (!err) {
+ /* FIXME: maybe new sysdev_create_subdir() */
+ err = kobject_register(&edac_memctrl_kobj);
+ if (err) {
+ debugf1("Failed to register '.../edac/mc'\n");
+ } else {
+ debugf1("Registered '.../edac/mc' kobject\n");
+ }
+ }
+ } else {
+ debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
+ }
+
+ return err;
+}
+
+/*
+ * MC teardown:
+ * the '..../edac/mc' kobject followed by '..../edac' itself
+ */
+static void edac_sysfs_memctrl_teardown(void)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Unregister the MC's kobject */
+ kobject_unregister(&edac_memctrl_kobj);
+
+ /* release the master edac mc kobject */
+ kobject_put(&edac_memctrl_kobj);
+
+ /* Unregister the 'edac' object */
+ sysdev_class_unregister(&edac_class);
+}
+
+/*
+ * /sys/devices/system/edac/pci;
+ * data structures and methods
+ */
+
+struct list_control {
+ struct edac_pci_device_list *list;
+ int *count;
+};
+
+/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
+static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
+{
+ struct list_control *listctl;
+ struct edac_pci_device_list *list;
+ char *p = buffer;
+ int len=0;
+ int i;
+
+ listctl = ptr;
+ list = listctl->list;
+
+ for (i = 0; i < *(listctl->count); i++, list++ ) {
+ if (len > 0)
+ len += snprintf(p + len, (PAGE_SIZE-len), ",");
+
+ len += snprintf(p + len,
+ (PAGE_SIZE-len),
+ "%x:%x",
+ list->vendor,list->device);
+ }
+
+ len += snprintf(p + len,(PAGE_SIZE-len), "\n");
+
+ return (ssize_t) len;
+}
+
+/**
+ *
+ * Scan string from **s to **e looking for one 'vendor:device' tuple
+ * where each field is a hex value
+ *
+ * return 0 if an entry is NOT found
+ * return 1 if an entry is found
+ * fill in *vendor_id and *device_id with values found
+ *
+ * In both cases, make sure *s has been moved forward toward *e
+ */
+static int parse_one_device(const char **s,const char **e,
+ unsigned int *vendor_id, unsigned int *device_id)
+{
+ const char *runner, *p;
+
+ /* if null byte, we are done */
+ if (!**s) {
+ (*s)++; /* keep *s moving */
+ return 0;
+ }
+
+ /* skip over newlines & whitespace */
+ if ((**s == '\n') || isspace(**s)) {
+ (*s)++;
+ return 0;
+ }
+
+ if (!isxdigit(**s)) {
+ (*s)++;
+ return 0;
+ }
+
+ /* parse vendor_id */
+ runner = *s;
+ while (runner < *e) {
+ /* scan for vendor:device delimiter */
+ if (*runner == ':') {
+ *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
+ runner = p + 1;
+ break;
+ }
+ runner++;
+ }
+
+ if (!isxdigit(*runner)) {
+ *s = ++runner;
+ return 0;
+ }
+
+ /* parse device_id */
+ if (runner < *e) {
+ *device_id = simple_strtol((char*)runner, (char**)&p, 16);
+ runner = p;
+ }
+
+ *s = runner;
+
+ return 1;
+}
+
+static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
+ size_t count)
+{
+ struct list_control *listctl;
+ struct edac_pci_device_list *list;
+ unsigned int vendor_id, device_id;
+ const char *s, *e;
+ int *index;
+
+ s = (char*)buffer;
+ e = s + count;
+
+ listctl = ptr;
+ list = listctl->list;
+ index = listctl->count;
+
+ *index = 0;
+ while (*index < MAX_LISTED_PCI_DEVICES) {
+
+ if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
+ list[ *index ].vendor = vendor_id;
+ list[ *index ].device = device_id;
+ (*index)++;
+ }
+
+ /* check for all data consume */
+ if (s >= e)
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+ int *value = ptr;
+ return sprintf(buffer,"%d\n",*value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer,NULL,0);
+
+ return count;
+}
+
+struct edac_pci_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t (*show)(void *,char *);
+ ssize_t (*store)(void *, const char *,size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->show(edac_pci_dev->value, buffer);
+ return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+ .show = edac_pci_dev_show,
+ .store = edac_pci_dev_store
+};
+
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
+struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
+struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+static struct list_control pci_whitelist_control = {
+ .list = pci_whitelist,
+ .count = &pci_whitelist_count
+};
+
+static struct list_control pci_blacklist_control = {
+ .list = pci_blacklist,
+ .count = &pci_blacklist_count
+};
+
+/* whitelist attribute */
+EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
+ &pci_whitelist_control,
+ S_IRUGO|S_IWUSR,
+ edac_pci_list_string_show,
+ edac_pci_list_string_store);
+
+EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
+ &pci_blacklist_control,
+ S_IRUGO|S_IWUSR,
+ edac_pci_list_string_show,
+ edac_pci_list_string_store);
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
+EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+ &edac_pci_attr_check_pci_parity,
+ &edac_pci_attr_panic_on_pci_parity,
+ &edac_pci_attr_pci_parity_count,
+ &edac_pci_attr_pci_parity_whitelist,
+ &edac_pci_attr_pci_parity_blacklist,
+ NULL,
+};
+
+/* No memory to release */
+static void edac_pci_release(struct kobject *kobj)
+{
+ debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_edac_pci = {
+ .release = edac_pci_release,
+ .sysfs_ops = &edac_pci_sysfs_ops,
+ .default_attrs = (struct attribute **) edac_pci_attr,
+};
+
+/**
+ * edac_sysfs_pci_setup()
+ *
+ */
+static int edac_sysfs_pci_setup(void)
+{
+ int err;
+
+ debugf1("MC: " __FILE__ ": %s()\n", __func__);
+
+ memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
+
+ kobject_init(&edac_pci_kobj);
+ edac_pci_kobj.parent = &edac_class.kset.kobj;
+ edac_pci_kobj.ktype = &ktype_edac_pci;
+
+ err = kobject_set_name(&edac_pci_kobj, "pci");
+ if (!err) {
+ /* Instanstiate the csrow object */
+ /* FIXME: maybe new sysdev_create_subdir() */
+ err = kobject_register(&edac_pci_kobj);
+ if (err)
+ debugf1("Failed to register '.../edac/pci'\n");
+ else
+ debugf1("Registered '.../edac/pci' kobject\n");
+ }
+ return err;
+}
+
+
+static void edac_sysfs_pci_teardown(void)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ kobject_unregister(&edac_pci_kobj);
+ kobject_put(&edac_pci_kobj);
+}
+
+/* EDAC sysfs CSROW data structures and methods */
+
+/* Set of more detailed csrow<id> attribute show/store functions */
+static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 0) {
+ size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
+ csrow->channels[0].label);
+ }
+ return size;
+}
+
+static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 0) {
+ size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
+ csrow->channels[1].label);
+ }
+ return size;
+}
+
+static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
+ const char *data, size_t size)
+{
+ ssize_t max_size = 0;
+
+ if (csrow->nr_channels > 0) {
+ max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
+ strncpy(csrow->channels[0].label, data, max_size);
+ csrow->channels[0].label[max_size] = '\0';
+ }
+ return size;
+}
+
+static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
+ const char *data, size_t size)
+{
+ ssize_t max_size = 0;
+
+ if (csrow->nr_channels > 1) {
+ max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
+ strncpy(csrow->channels[1].label, data, max_size);
+ csrow->channels[1].label[max_size] = '\0';
+ }
+ return max_size;
+}
+
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 0) {
+ size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
+ }
+ return size;
+}
+
+static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
+{
+ ssize_t size = 0;
+
+ if (csrow->nr_channels > 1) {
+ size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
+ }
+ return size;
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
+{
+ return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
+}
+
+struct csrowdev_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct csrow_info *,char *);
+ ssize_t (*store)(struct csrow_info *, const char *,size_t);
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for csrow objects */
+static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->show)
+ return csrowdev_attr->show(csrow, buffer);
+ return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->store)
+ return csrowdev_attr->store(csrow, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+ .show = csrowdev_show,
+ .store = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store) \
+struct csrowdev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
+CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
+CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
+CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
+CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
+CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
+CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
+CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
+
+/* control/attribute files */
+CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
+ csrow_ch0_dimm_label_show,
+ csrow_ch0_dimm_label_store);
+CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
+ csrow_ch1_dimm_label_show,
+ csrow_ch1_dimm_label_store);
+
+
+/* Attributes of the CSROW<id> object */
+static struct csrowdev_attribute *csrow_attr[] = {
+ &attr_dev_type,
+ &attr_mem_type,
+ &attr_edac_mode,
+ &attr_size_mb,
+ &attr_ue_count,
+ &attr_ce_count,
+ &attr_ch0_ce_count,
+ &attr_ch1_ce_count,
+ &attr_ch0_dimm_label,
+ &attr_ch1_dimm_label,
+ NULL,
+};
+
+
+/* No memory to release */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+ debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_csrow = {
+ .release = edac_csrow_instance_release,
+ .sysfs_ops = &csrowfs_ops,
+ .default_attrs = (struct attribute **) csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
+ struct csrow_info *csrow, int index )
+{
+ int err = 0;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+
+ /* generate ..../edac/mc/mc<id>/csrow<index> */
+
+ kobject_init(&csrow->kobj);
+ csrow->kobj.parent = edac_mci_kobj;
+ csrow->kobj.ktype = &ktype_csrow;
+
+ /* name this instance of csrow<id> */
+ err = kobject_set_name(&csrow->kobj,"csrow%d",index);
+ if (!err) {
+ /* Instanstiate the csrow object */
+ err = kobject_register(&csrow->kobj);
+ if (err)
+ debugf0("Failed to register CSROW%d\n",index);
+ else
+ debugf0("Registered CSROW%d\n",index);
+ }
+
+ return err;
+}
+
+/* sysfs data structures and methods for the MCI kobjects */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+ const char *data, size_t count )
+{
+ int row, chan;
+
+ mci->ue_noinfo_count = 0;
+ mci->ce_noinfo_count = 0;
+ mci->ue_count = 0;
+ mci->ce_count = 0;
+ for (row = 0; row < mci->nr_csrows; row++) {
+ struct csrow_info *ri = &mci->csrows[row];
+
+ ri->ue_count = 0;
+ ri->ce_count = 0;
+ for (chan = 0; chan < ri->nr_channels; chan++)
+ ri->channels[chan].ce_count = 0;
+ }
+ mci->start_time = jiffies;
+
+ return count;
+}
+
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data,"%s\n", mci->ctl_name);
+}
+
+static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
+{
+ char *p = buf;
+ int bit_idx;
+
+ for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
+ if ((edac_cap >> bit_idx) & 0x1)
+ p += sprintf(p, "%s ", edac_caps[bit_idx]);
+ }
+
+ return p - buf;
+}
+
+static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
+{
+ char *p = data;
+
+ p += mci_output_edac_cap(p,mci->edac_ctl_cap);
+ p += sprintf(p, "\n");
+
+ return p - data;
+}
+
+static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ char *p = data;
+
+ p += mci_output_edac_cap(p,mci->edac_cap);
+ p += sprintf(p, "\n");
+
+ return p - data;
+}
+
+static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
+{
+ char *p = buf;
+ int bit_idx;
+
+ for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
+ if ((mtype_cap >> bit_idx) & 0x1)
+ p += sprintf(p, "%s ", mem_types[bit_idx]);
+ }
+
+ return p - buf;
+}
+
+static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
+{
+ char *p = data;
+
+ p += mci_output_mtype_cap(p,mci->mtype_cap);
+ p += sprintf(p, "\n");
+
+ return p - data;
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+ int total_pages, csrow_idx;
+
+ for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+ csrow_idx++) {
+ struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+ if (!csrow->nr_pages)
+ continue;
+ total_pages += csrow->nr_pages;
+ }
+
+ return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
+}
+
+struct mcidev_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mem_ctl_info *,char *);
+ ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
+
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+ return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops mci_ops = {
+ .show = mcidev_show,
+ .store = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store) \
+struct mcidev_attribute mci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* Control file */
+MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
+
+/* Attribute files */
+MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
+MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
+MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
+MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
+MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
+MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
+MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
+MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
+MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
+MCIDEV_ATTR(edac_current_capability,S_IRUGO,
+ mci_edac_current_capability_show,NULL);
+MCIDEV_ATTR(supported_mem_type,S_IRUGO,
+ mci_supported_mem_type_show,NULL);
+
+
+static struct mcidev_attribute *mci_attr[] = {
+ &mci_attr_reset_counters,
+ &mci_attr_module_name,
+ &mci_attr_mc_name,
+ &mci_attr_edac_capability,
+ &mci_attr_edac_current_capability,
+ &mci_attr_supported_mem_type,
+ &mci_attr_size_mb,
+ &mci_attr_seconds_since_reset,
+ &mci_attr_ue_noinfo_count,
+ &mci_attr_ce_noinfo_count,
+ &mci_attr_ue_count,
+ &mci_attr_ce_count,
+ NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ */
+static void edac_mci_instance_release(struct kobject *kobj)
+{
+ struct mem_ctl_info *mci;
+ mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
+
+ debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
+ __func__, mci->mc_idx);
+
+ kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+ .release = edac_mci_instance_release,
+ .sysfs_ops = &mci_ops,
+ .default_attrs = (struct attribute **) mci_attr,
+};
+
+#define EDAC_DEVICE_SYMLINK "device"
+
+/*
+ * Create a new Memory Controller kobject instance,
+ * mc<id> under the 'mc' directory
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+ int err;
+ struct csrow_info *csrow;
+ struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
+
+ debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
+
+ memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
+ kobject_init(edac_mci_kobj);
+
+ /* set the name of the mc<id> object */
+ err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
+ if (err)
+ return err;
+
+ /* link to our parent the '..../edac/mc' object */
+ edac_mci_kobj->parent = &edac_memctrl_kobj;
+ edac_mci_kobj->ktype = &ktype_mci;
+
+ /* register the mc<id> kobject */
+ err = kobject_register(edac_mci_kobj);
+ if (err)
+ return err;
+
+ /* create a symlink for the device */
+ err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
+ EDAC_DEVICE_SYMLINK);
+ if (err) {
+ kobject_unregister(edac_mci_kobj);
+ return err;
+ }
+
+ /* Make directories for each CSROW object
+ * under the mc<id> kobject
+ */
+ for (i = 0; i < mci->nr_csrows; i++) {
+
+ csrow = &mci->csrows[i];
+
+ /* Only expose populated CSROWs */
+ if (csrow->nr_pages > 0) {
+ err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
+ if (err)
+ goto fail;
+ }
+ }
+
+ /* Mark this MCI instance as having sysfs entries */
+ mci->sysfs_active = MCI_SYSFS_ACTIVE;
+
+ return 0;
+
+
+ /* CSROW error: backout what has already been registered, */
+fail:
+ for ( i--; i >= 0; i--) {
+ if (csrow->nr_pages > 0) {
+ kobject_unregister(&mci->csrows[i].kobj);
+ kobject_put(&mci->csrows[i].kobj);
+ }
+ }
+
+ kobject_unregister(edac_mci_kobj);
+ kobject_put(edac_mci_kobj);
+
+ return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* remove all csrow kobjects */
+ for (i = 0; i < mci->nr_csrows; i++) {
+ if (mci->csrows[i].nr_pages > 0) {
+ kobject_unregister(&mci->csrows[i].kobj);
+ kobject_put(&mci->csrows[i].kobj);
+ }
+ }
+
+ sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+ kobject_unregister(&mci->edac_mci_kobj);
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* END OF sysfs data and methods */
+
+#ifdef CONFIG_EDAC_DEBUG
+
+EXPORT_SYMBOL(edac_mc_dump_channel);
+
+void edac_mc_dump_channel(struct channel_info *chan)
+{
+ debugf4("\tchannel = %p\n", chan);
+ debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
+ debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
+ debugf4("\tchannel->label = '%s'\n", chan->label);
+ debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+}
+
+
+EXPORT_SYMBOL(edac_mc_dump_csrow);
+
+void edac_mc_dump_csrow(struct csrow_info *csrow)
+{
+ debugf4("\tcsrow = %p\n", csrow);
+ debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
+ debugf4("\tcsrow->first_page = 0x%lx\n",
+ csrow->first_page);
+ debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
+ debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
+ debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
+ debugf4("\tcsrow->nr_channels = %d\n",
+ csrow->nr_channels);
+ debugf4("\tcsrow->channels = %p\n", csrow->channels);
+ debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+}
+
+
+EXPORT_SYMBOL(edac_mc_dump_mci);
+
+void edac_mc_dump_mci(struct mem_ctl_info *mci)
+{
+ debugf3("\tmci = %p\n", mci);
+ debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+ debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+ debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
+ debugf4("\tmci->edac_check = %p\n", mci->edac_check);
+ debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
+ mci->nr_csrows, mci->csrows);
+ debugf3("\tpdev = %p\n", mci->pdev);
+ debugf3("\tmod_name:ctl_name = %s:%s\n",
+ mci->mod_name, mci->ctl_name);
+ debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+}
+
+
+#endif /* CONFIG_EDAC_DEBUG */
+
+/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
+ * Adjust 'ptr' so that its alignment is at least as stringent as what the
+ * compiler would provide for X and return the aligned result.
+ *
+ * If 'size' is a constant, the compiler will optimize this whole function
+ * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ */
+static inline char * align_ptr (void *ptr, unsigned size)
+{
+ unsigned align, r;
+
+ /* Here we assume that the alignment of a "long long" is the most
+ * stringent alignment that the compiler will ever provide by default.
+ * As far as I know, this is a reasonable assumption.
+ */
+ if (size > sizeof(long))
+ align = sizeof(long long);
+ else if (size > sizeof(int))
+ align = sizeof(long);
+ else if (size > sizeof(short))
+ align = sizeof(int);
+ else if (size > sizeof(char))
+ align = sizeof(short);
+ else
+ return (char *) ptr;
+
+ r = size % align;
+
+ if (r == 0)
+ return (char *) ptr;
+
+ return (char *) (((unsigned long) ptr) + align - r);
+}
+
+
+EXPORT_SYMBOL(edac_mc_alloc);
+
+/**
+ * edac_mc_alloc: Allocate a struct mem_ctl_info structure
+ * @size_pvt: size of private storage needed
+ * @nr_csrows: Number of CWROWS needed for this MC
+ * @nr_chans: Number of channels for the MC
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * Returns:
+ * NULL allocation failed
+ * struct mem_ctl_info pointer
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+ unsigned nr_chans)
+{
+ struct mem_ctl_info *mci;
+ struct csrow_info *csi, *csrow;
+ struct channel_info *chi, *chp, *chan;
+ void *pvt;
+ unsigned size;
+ int row, chn;
+
+ /* Figure out the offsets of the various items from the start of an mc
+ * structure. We want the alignment of each item to be at least as
+ * stringent as what the compiler would provide if we could simply
+ * hardcode everything into a single struct.
+ */
+ mci = (struct mem_ctl_info *) 0;
+ csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
+ chi = (struct channel_info *)
+ align_ptr(&csi[nr_csrows], sizeof(*chi));
+ pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ size = ((unsigned long) pvt) + sz_pvt;
+
+ if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* Adjust pointers so they point within the memory we just allocated
+ * rather than an imaginary chunk of memory located at address 0.
+ */
+ csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
+ chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
+ pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
+
+ memset(mci, 0, size); /* clear all fields */
+
+ mci->csrows = csi;
+ mci->pvt_info = pvt;
+ mci->nr_csrows = nr_csrows;
+
+ for (row = 0; row < nr_csrows; row++) {
+ csrow = &csi[row];
+ csrow->csrow_idx = row;
+ csrow->mci = mci;
+ csrow->nr_channels = nr_chans;
+ chp = &chi[row * nr_chans];
+ csrow->channels = chp;
+
+ for (chn = 0; chn < nr_chans; chn++) {
+ chan = &chp[chn];
+ chan->chan_idx = chn;
+ chan->csrow = csrow;
+ }
+ }
+
+ return mci;
+}
+
+
+EXPORT_SYMBOL(edac_mc_free);
+
+/**
+ * edac_mc_free: Free a previously allocated 'mci' structure
+ * @mci: pointer to a struct mem_ctl_info structure
+ *
+ * Free up a previously allocated mci structure
+ * A MCI structure can be in 2 states after being allocated
+ * by edac_mc_alloc().
+ * 1) Allocated in a MC driver's probe, but not yet committed
+ * 2) Allocated and committed, by a call to edac_mc_add_mc()
+ * edac_mc_add_mc() is the function that adds the sysfs entries
+ * thus, this free function must determine which state the 'mci'
+ * structure is in, then either free it directly or
+ * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
+ *
+ * VOID Return
+ */
+void edac_mc_free(struct mem_ctl_info *mci)
+{
+ /* only if sysfs entries for this mci instance exist
+ * do we remove them and defer the actual kfree via
+ * the kobject 'release()' callback.
+ *
+ * Otherwise, do a straight kfree now.
+ */
+ if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
+ edac_remove_sysfs_mci_device(mci);
+ else
+ kfree(mci);
+}
+
+
+
+EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
+
+struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct list_head *item;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->pdev == pdev)
+ return mci;
+ }
+
+ return NULL;
+}
+
+static int add_mc_to_global_list (struct mem_ctl_info *mci)
+{
+ struct list_head *item, *insert_before;
+ struct mem_ctl_info *p;
+ int i;
+
+ if (list_empty(&mc_devices)) {
+ mci->mc_idx = 0;
+ insert_before = &mc_devices;
+ } else {
+ if (edac_mc_find_mci_by_pdev(mci->pdev)) {
+ printk(KERN_WARNING
+ "EDAC MC: %s (%s) %s %s already assigned %d\n",
+ mci->pdev->dev.bus_id, pci_name(mci->pdev),
+ mci->mod_name, mci->ctl_name, mci->mc_idx);
+ return 1;
+ }
+
+ insert_before = NULL;
+ i = 0;
+
+ list_for_each(item, &mc_devices) {
+ p = list_entry(item, struct mem_ctl_info, link);
+
+ if (p->mc_idx != i) {
+ insert_before = item;
+ break;
+ }
+
+ i++;
+ }
+
+ mci->mc_idx = i;
+
+ if (insert_before == NULL)
+ insert_before = &mc_devices;
+ }
+
+ list_add_tail_rcu(&mci->link, insert_before);
+ return 0;
+}
+
+
+
+EXPORT_SYMBOL(edac_mc_add_mc);
+
+/**
+ * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
+ * @mci: pointer to the mci structure to be added to the list
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+
+/* FIXME - should a warning be printed if no error detection? correction? */
+int edac_mc_add_mc(struct mem_ctl_info *mci)
+{
+ int rc = 1;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+#ifdef CONFIG_EDAC_DEBUG
+ if (edac_debug_level >= 3)
+ edac_mc_dump_mci(mci);
+ if (edac_debug_level >= 4) {
+ int i;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ int j;
+ edac_mc_dump_csrow(&mci->csrows[i]);
+ for (j = 0; j < mci->csrows[i].nr_channels; j++)
+ edac_mc_dump_channel(&mci->csrows[i].
+ channels[j]);
+ }
+ }
+#endif
+ down(&mem_ctls_mutex);
+
+ if (add_mc_to_global_list(mci))
+ goto finish;
+
+ /* set load time so that error rate can be tracked */
+ mci->start_time = jiffies;
+
+ if (edac_create_sysfs_mci_device(mci)) {
+ printk(KERN_WARNING
+ "EDAC MC%d: failed to create sysfs device\n",
+ mci->mc_idx);
+ /* FIXME - should there be an error code and unwind? */
+ goto finish;
+ }
+
+ /* Report action taken */
+ printk(KERN_INFO
+ "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
+ mci->mc_idx, mci->mod_name, mci->ctl_name,
+ pci_name(mci->pdev));
+
+
+ rc = 0;
+
+finish:
+ up(&mem_ctls_mutex);
+ return rc;
+}
+
+
+
+static void complete_mc_list_del (struct rcu_head *head)
+{
+ struct mem_ctl_info *mci;
+
+ mci = container_of(head, struct mem_ctl_info, rcu);
+ INIT_LIST_HEAD(&mci->link);
+ complete(&mci->complete);
+}
+
+static void del_mc_from_global_list (struct mem_ctl_info *mci)
+{
+ list_del_rcu(&mci->link);
+ init_completion(&mci->complete);
+ call_rcu(&mci->rcu, complete_mc_list_del);
+ wait_for_completion(&mci->complete);
+}
+
+EXPORT_SYMBOL(edac_mc_del_mc);
+
+/**
+ * edac_mc_del_mc: Remove the specified mci structure from global list
+ * @mci: Pointer to struct mem_ctl_info structure
+ *
+ * Returns:
+ * 0 Success
+ * 1 Failure
+ */
+int edac_mc_del_mc(struct mem_ctl_info *mci)
+{
+ int rc = 1;
+
+ debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ down(&mem_ctls_mutex);
+ del_mc_from_global_list(mci);
+ printk(KERN_INFO
+ "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
+ mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
+ pci_name(mci->pdev));
+ rc = 0;
+ up(&mem_ctls_mutex);
+
+ return rc;
+}
+
+
+EXPORT_SYMBOL(edac_mc_scrub_block);
+
+void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+ u32 size)
+{
+ struct page *pg;
+ void *virt_addr;
+ unsigned long flags = 0;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* ECC error page was not in our memory. Ignore it. */
+ if(!pfn_valid(page))
+ return;
+
+ /* Find the actual page structure then map it and fix */
+ pg = pfn_to_page(page);
+
+ if (PageHighMem(pg))
+ local_irq_save(flags);
+
+ virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+
+ /* Perform architecture specific atomic scrub operation */
+ atomic_scrub(virt_addr + offset, size);
+
+ /* Unmap and complete */
+ kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+
+ if (PageHighMem(pg))
+ local_irq_restore(flags);
+}
+
+
+/* FIXME - should return -1 */
+EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
+
+int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page)
+{
+ struct csrow_info *csrows = mci->csrows;
+ int row, i;
+
+ debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
+ page);
+ row = -1;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ struct csrow_info *csrow = &csrows[i];
+
+ if (csrow->nr_pages == 0)
+ continue;
+
+ debugf3("MC%d: " __FILE__
+ ": %s(): first(0x%lx) page(0x%lx)"
+ " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
+ __func__, csrow->first_page, page,
+ csrow->last_page, csrow->page_mask);
+
+ if ((page >= csrow->first_page) &&
+ (page <= csrow->last_page) &&
+ ((page & csrow->page_mask) ==
+ (csrow->first_page & csrow->page_mask))) {
+ row = i;
+ break;
+ }
+ }
+
+ if (row == -1)
+ printk(KERN_ERR
+ "EDAC MC%d: could not look up page error address %lx\n",
+ mci->mc_idx, (unsigned long) page);
+
+ return row;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ce);
+
+/* FIXME - setable log (warning/emerg) levels */
+/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
+void edac_mc_handle_ce(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ unsigned long syndrome, int row, int channel,
+ const char *msg)
+{
+ unsigned long remapped_page;
+
+ debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+
+ /* FIXME - maybe make panic on INTERNAL ERROR an option */
+ if (row >= mci->nr_csrows || row < 0) {
+ /* something is wrong */
+ printk(KERN_ERR
+ "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
+ mci->mc_idx, row, mci->nr_csrows);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+ if (channel >= mci->csrows[row].nr_channels || channel < 0) {
+ /* something is wrong */
+ printk(KERN_ERR
+ "EDAC MC%d: INTERNAL ERROR: channel out of range "
+ "(%d >= %d)\n",
+ mci->mc_idx, channel, mci->csrows[row].nr_channels);
+ edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ if (log_ce)
+ /* FIXME - put in DIMM location */
+ printk(KERN_WARNING
+ "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
+ " grain %d, syndrome 0x%lx, row %d, channel %d,"
+ " label \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, syndrome, row, channel,
+ mci->csrows[row].channels[channel].label, msg);
+
+ mci->ce_count++;
+ mci->csrows[row].ce_count++;
+ mci->csrows[row].channels[channel].ce_count++;
+
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some MC's can remap memory so that it is still available
+ * at a different address when PCI devices map into memory.
+ * MC's that can't do this lose the memory where PCI devices
+ * are mapped. This mapping is MC dependant and so we call
+ * back into the MC driver for it to map the MC page to
+ * a physical (CPU) page which can then be mapped to a virtual
+ * page - which can then be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page, offset_in_page,
+ mci->csrows[row].grain);
+ }
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
+
+void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+ const char *msg)
+{
+ if (log_ce)
+ printk(KERN_WARNING
+ "EDAC MC%d: CE - no information available: %s\n",
+ mci->mc_idx, msg);
+ mci->ce_noinfo_count++;
+ mci->ce_count++;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ue);
+
+void edac_mc_handle_ue(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row,
+ const char *msg)
+{
+ int len = EDAC_MC_LABEL_LEN * 4;
+ char labels[len + 1];
+ char *pos = labels;
+ int chan;
+ int chars;
+
+ debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+
+ /* FIXME - maybe make panic on INTERNAL ERROR an option */
+ if (row >= mci->nr_csrows || row < 0) {
+ /* something is wrong */
+ printk(KERN_ERR
+ "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
+ mci->mc_idx, row, mci->nr_csrows);
+ edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ return;
+ }
+
+ chars = snprintf(pos, len + 1, "%s",
+ mci->csrows[row].channels[0].label);
+ len -= chars;
+ pos += chars;
+ for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
+ chan++) {
+ chars = snprintf(pos, len + 1, ":%s",
+ mci->csrows[row].channels[chan].label);
+ len -= chars;
+ pos += chars;
+ }
+
+ if (log_ue)
+ printk(KERN_EMERG
+ "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
+ " labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
+
+ if (panic_on_ue)
+ panic
+ ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
+ " labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
+
+ mci->ue_count++;
+ mci->csrows[row].ue_count++;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
+
+void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+ const char *msg)
+{
+ if (panic_on_ue)
+ panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+
+ if (log_ue)
+ printk(KERN_WARNING
+ "EDAC MC%d: UE - no information available: %s\n",
+ mci->mc_idx, msg);
+ mci->ue_noinfo_count++;
+ mci->ue_count++;
+}
+
+
+#ifdef CONFIG_PCI
+
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+ int where;
+ u16 status;
+
+ where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+ pci_read_config_word(dev, where, &status);
+
+ /* If we get back 0xFFFF then we must suspect that the card has been pulled but
+ the Linux PCI layer has not yet finished cleaning up. We don't want to report
+ on such devices */
+
+ if (status == 0xFFFF) {
+ u32 sanity;
+ pci_read_config_dword(dev, 0, &sanity);
+ if (sanity == 0xFFFFFFFF)
+ return 0;
+ }
+ status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_PARITY;
+
+ if (status)
+ /* reset only the bits we are interested in */
+ pci_write_config_word(dev, where, status);
+
+ return status;
+}
+
+typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear( struct pci_dev *dev )
+{
+ u8 header_type;
+
+ get_pci_parity_status(dev, 0);
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+ get_pci_parity_status(dev, 1);
+}
+
+/*
+ * PCI Parity polling
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+ u16 status;
+ u8 header_type;
+
+ /* read the STATUS register on this device
+ */
+ status = get_pci_parity_status(dev, 0);
+
+ debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
+
+ /* check the status reg for errors */
+ if (status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
+ printk(KERN_CRIT
+ "EDAC PCI- "
+ "Signaled System Error on %s\n",
+ pci_name (dev));
+
+ if (status & (PCI_STATUS_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI- "
+ "Master Data Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI- "
+ "Detected Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* On bridges, need to examine secondary status register */
+ status = get_pci_parity_status(dev, 1);
+
+ debugf2("PCI SEC_STATUS= 0x%04x %s\n",
+ status, dev->dev.bus_id );
+
+ /* check the secondary status reg for errors */
+ if (status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
+ printk(KERN_CRIT
+ "EDAC PCI-Bridge- "
+ "Signaled System Error on %s\n",
+ pci_name (dev));
+
+ if (status & (PCI_STATUS_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI-Bridge- "
+ "Master Data Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ printk(KERN_CRIT
+ "EDAC PCI-Bridge- "
+ "Detected Parity Error on %s\n",
+ pci_name (dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+ }
+}
+
+/*
+ * check_dev_on_list: Scan for a PCI device on a white/black list
+ * @list: an EDAC &edac_pci_device_list white/black list pointer
+ * @free_index: index of next free entry on the list
+ * @pci_dev: PCI Device pointer
+ *
+ * see if list contains the device.
+ *
+ * Returns: 0 not found
+ * 1 found on list
+ */
+static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
+ struct pci_dev *dev)
+{
+ int i;
+ int rc = 0; /* Assume not found */
+ unsigned short vendor=dev->vendor;
+ unsigned short device=dev->device;
+
+ /* Scan the list, looking for a vendor/device match
+ */
+ for (i = 0; i < free_index; i++, list++ ) {
+ if ( (list->vendor == vendor ) &&
+ (list->device == device )) {
+ rc = 1;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * pci_dev parity list iterator
+ * Scan the PCI device list for one iteration, looking for SERRORs
+ * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+ struct pci_dev *dev=NULL;
+
+ /* request for kernel access to the next PCI device, if any,
+ * and while we are looking at it have its reference count
+ * bumped until we are done with it
+ */
+ while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+
+ /* if whitelist exists then it has priority, so only scan those
+ * devices on the whitelist
+ */
+ if (pci_whitelist_count > 0 ) {
+ if (check_dev_on_list(pci_whitelist,
+ pci_whitelist_count, dev))
+ fn(dev);
+ } else {
+ /*
+ * if no whitelist, then check if this devices is
+ * blacklisted
+ */
+ if (!check_dev_on_list(pci_blacklist,
+ pci_blacklist_count, dev))
+ fn(dev);
+ }
+ }
+}
+
+static void do_pci_parity_check(void)
+{
+ unsigned long flags;
+ int before_count;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ if (!check_pci_parity)
+ return;
+
+ before_count = atomic_read(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges
+ */
+ local_irq_save(flags);
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+ local_irq_restore(flags);
+
+ /* Only if operator has selected panic on PCI Error */
+ if (panic_on_pci_parity) {
+ /* If the count is different 'after' from 'before' */
+ if (before_count != atomic_read(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+}
+
+
+static inline void clear_pci_parity_errors(void)
+{
+ /* Clear any PCI bus parity errors that devices initially have logged
+ * in their registers.
+ */
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+
+
+#else /* CONFIG_PCI */
+
+
+static inline void do_pci_parity_check(void)
+{
+ /* no-op */
+}
+
+
+static inline void clear_pci_parity_errors(void)
+{
+ /* no-op */
+}
+
+
+#endif /* CONFIG_PCI */
+
+/*
+ * Iterate over all MC instances and check for ECC, et al, errors
+ */
+static inline void check_mc_devices (void)
+{
+ unsigned long flags;
+ struct list_head *item;
+ struct mem_ctl_info *mci;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* during poll, have interrupts off */
+ local_irq_save(flags);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->edac_check != NULL)
+ mci->edac_check(mci);
+ }
+
+ local_irq_restore(flags);
+}
+
+
+/*
+ * Check MC status every poll_msec.
+ * Check PCI status every poll_msec as well.
+ *
+ * This where the work gets done for edac.
+ *
+ * SMP safe, doesn't use NMI, and auto-rate-limits.
+ */
+static void do_edac_check(void)
+{
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ check_mc_devices();
+
+ do_pci_parity_check();
+}
+
+
+/*
+ * EDAC thread state information
+ */
+struct bs_thread_info
+{
+ struct task_struct *task;
+ struct completion *event;
+ char *name;
+ void (*run)(void);
+};
+
+static struct bs_thread_info bs_thread;
+
+/*
+ * edac_kernel_thread
+ * This the kernel thread that processes edac operations
+ * in a normal thread environment
+ */
+static int edac_kernel_thread(void *arg)
+{
+ struct bs_thread_info *thread = (struct bs_thread_info *) arg;
+
+ /* detach thread */
+ daemonize(thread->name);
+
+ current->exit_signal = SIGCHLD;
+ allow_signal(SIGKILL);
+ thread->task = current;
+
+ /* indicate to starting task we have started */
+ complete(thread->event);
+
+ /* loop forever, until we are told to stop */
+ while(thread->run != NULL) {
+ void (*run)(void);
+
+ /* call the function to check the memory controllers */
+ run = thread->run;
+ if (run)
+ run();
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* ensure we are interruptable */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* goto sleep for the interval */
+ schedule_timeout((HZ * poll_msec) / 1000);
+ try_to_freeze();
+ }
+
+ /* notify waiter that we are exiting */
+ complete(thread->event);
+
+ return 0;
+}
+
+/*
+ * edac_mc_init
+ * module initialization entry point
+ */
+static int __init edac_mc_init(void)
+{
+ int ret;
+ struct completion event;
+
+ printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
+
+ /*
+ * Harvest and clear any boot/initialization PCI parity errors
+ *
+ * FIXME: This only clears errors logged by devices present at time of
+ * module initialization. We should also do an initial clear
+ * of each newly hotplugged device.
+ */
+ clear_pci_parity_errors();
+
+ /* perform check for first time to harvest boot leftovers */
+ do_edac_check();
+
+ /* Create the MC sysfs entires */
+ if (edac_sysfs_memctrl_setup()) {
+ printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
+ return -ENODEV;
+ }
+
+ /* Create the PCI parity sysfs entries */
+ if (edac_sysfs_pci_setup()) {
+ edac_sysfs_memctrl_teardown();
+ printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
+ return -ENODEV;
+ }
+
+ /* Create our kernel thread */
+ init_completion(&event);
+ bs_thread.event = &event;
+ bs_thread.name = "kedac";
+ bs_thread.run = do_edac_check;
+
+ /* create our kernel thread */
+ ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
+ if (ret < 0) {
+ /* remove the sysfs entries */
+ edac_sysfs_memctrl_teardown();
+ edac_sysfs_pci_teardown();
+ return -ENOMEM;
+ }
+
+ /* wait for our kernel theard ack that it is up and running */
+ wait_for_completion(&event);
+
+ return 0;
+}
+
+
+/*
+ * edac_mc_exit()
+ * module exit/termination functioni
+ */
+static void __exit edac_mc_exit(void)
+{
+ struct completion event;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ init_completion(&event);
+ bs_thread.event = &event;
+
+ /* As soon as ->run is set to NULL, the task could disappear,
+ * so we need to hold tasklist_lock until we have sent the signal
+ */
+ read_lock(&tasklist_lock);
+ bs_thread.run = NULL;
+ send_sig(SIGKILL, bs_thread.task, 1);
+ read_unlock(&tasklist_lock);
+ wait_for_completion(&event);
+
+ /* tear down the sysfs device */
+ edac_sysfs_memctrl_teardown();
+ edac_sysfs_pci_teardown();
+}
+
+
+
+
+module_init(edac_mc_init);
+module_exit(edac_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+ "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("Core library routines for MC reporting");
+
+module_param(panic_on_ue, int, 0644);
+MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(check_pci_parity, int, 0644);
+MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
+module_param(panic_on_pci_parity, int, 0644);
+MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
+module_param(log_ue, int, 0644);
+MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
+module_param(log_ce, int, 0644);
+MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
+module_param(poll_msec, int, 0644);
+MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 00000000000..75ecf484a43
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,448 @@
+/*
+ * MC kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ * http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#ifndef _EDAC_MC_H_
+#define _EDAC_MC_H_
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+
+
+#define EDAC_MC_LABEL_LEN 31
+#define MC_PROC_NAME_MAX_LEN 7
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#else /* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#endif
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+#define edac_debug_printk(level, fmt, args...) \
+do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
+#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
+#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
+#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
+#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
+#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+#else /* !CONFIG_EDAC_DEBUG */
+#define debugf0( ... )
+#define debugf1( ... )
+#define debugf2( ... )
+#define debugf3( ... )
+#define debugf4( ... )
+#endif /* !CONFIG_EDAC_DEBUG */
+
+
+#define bs_xstr(s) bs_str(s)
+#define bs_str(s) #s
+#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
+
+#define BIT(x) (1 << (x))
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+/* memory devices */
+enum dev_type {
+ DEV_UNKNOWN = 0,
+ DEV_X1,
+ DEV_X2,
+ DEV_X4,
+ DEV_X8,
+ DEV_X16,
+ DEV_X32, /* Do these parts exist? */
+ DEV_X64 /* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1 BIT(DEV_X1)
+#define DEV_FLAG_X2 BIT(DEV_X2)
+#define DEV_FLAG_X4 BIT(DEV_X4)
+#define DEV_FLAG_X8 BIT(DEV_X8)
+#define DEV_FLAG_X16 BIT(DEV_X16)
+#define DEV_FLAG_X32 BIT(DEV_X32)
+#define DEV_FLAG_X64 BIT(DEV_X64)
+
+/* memory types */
+enum mem_type {
+ MEM_EMPTY = 0, /* Empty csrow */
+ MEM_RESERVED, /* Reserved csrow type */
+ MEM_UNKNOWN, /* Unknown csrow type */
+ MEM_FPM, /* Fast page mode */
+ MEM_EDO, /* Extended data out */
+ MEM_BEDO, /* Burst Extended data out */
+ MEM_SDR, /* Single data rate SDRAM */
+ MEM_RDR, /* Registered single data rate SDRAM */
+ MEM_DDR, /* Double data rate SDRAM */
+ MEM_RDDR, /* Registered Double data rate SDRAM */
+ MEM_RMBS /* Rambus DRAM */
+};
+
+#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM BIT(MEM_FPM)
+#define MEM_FLAG_EDO BIT(MEM_EDO)
+#define MEM_FLAG_BEDO BIT(MEM_BEDO)
+#define MEM_FLAG_SDR BIT(MEM_SDR)
+#define MEM_FLAG_RDR BIT(MEM_RDR)
+#define MEM_FLAG_DDR BIT(MEM_DDR)
+#define MEM_FLAG_RDDR BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS BIT(MEM_RMBS)
+
+
+/* chipset Error Detection and Correction capabilities and mode */
+enum edac_type {
+ EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
+ EDAC_NONE, /* Doesnt support ECC */
+ EDAC_RESERVED, /* Reserved ECC type */
+ EDAC_PARITY, /* Detects parity errors */
+ EDAC_EC, /* Error Checking - no correction */
+ EDAC_SECDED, /* Single bit error correction, Double detection */
+ EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
+ EDAC_S4ECD4ED, /* Chipkill x4 devices */
+ EDAC_S8ECD8ED, /* Chipkill x8 devices */
+ EDAC_S16ECD16ED, /* Chipkill x16 devices */
+};
+
+#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
+
+
+/* scrubbing capabilities */
+enum scrub_type {
+ SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
+ SCRUB_NONE, /* No scrubber */
+ SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
+ SCRUB_SW_SRC, /* Software scrub only errors */
+ SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
+ SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
+ SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
+ SCRUB_HW_SRC, /* Hardware scrub only errors */
+ SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
+ SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
+};
+
+#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR)
+#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
+#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR)
+#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
+#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
+
+enum mci_sysfs_status {
+ MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
+ MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
+};
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/*
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!). In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices: The individual chip on a memory stick. These devices
+ * commonly output 4 and 8 bits each. Grouping several
+ * of these in parallel provides 64 bits which is common
+ * for a memory stick.
+ *
+ * Memory Stick: A printed circuit board that agregates multiple
+ * memory devices in parallel. This is the atomic
+ * memory component that is purchaseable by Joe consumer
+ * and loaded into a memory socket.
+ *
+ * Socket: A physical connector on the motherboard that accepts
+ * a single memory stick.
+ *
+ * Channel: Set of memory devices on a memory stick that must be
+ * grouped in parallel with one or more additional
+ * channels from other memory sticks. This parallel
+ * grouping of the output from multiple channels are
+ * necessary for the smallest granularity of memory access.
+ * Some memory controllers are capable of single channel -
+ * which means that memory sticks can be loaded
+ * individually. Other memory controllers are only
+ * capable of dual channel - which means that memory
+ * sticks must be loaded as pairs (see "socket set").
+ *
+ * Chip-select row: All of the memory devices that are selected together.
+ * for a single, minimum grain of memory access.
+ * This selects all of the parallel memory devices across
+ * all of the parallel channels. Common chip-select rows
+ * for single channel are 64 bits, for dual channel 128
+ * bits.
+ *
+ * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memmory.
+ * Motherboards commonly drive two chip-select pins to
+ * a memory stick. A single-ranked stick, will occupy
+ * only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick: A double-ranked stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently.
+ *
+ * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
+ * A double-sided stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently. "Double-sided"
+ * is irrespective of the memory devices being mounted
+ * on both sides of the memory stick.
+ *
+ * Socket set: All of the memory sticks that are required for for
+ * a single memory access or all of the memory sticks
+ * spanned by a chip-select row. A single socket set
+ * has two chip-select rows and if double-sided sticks
+ * are used these will occupy those chip-select rows.
+ *
+ * Bank: This term is avoided because it is unclear when
+ * needing to distinguish between chip-select rows and
+ * socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+
+struct channel_info {
+ int chan_idx; /* channel index */
+ u32 ce_count; /* Correctable Errors for this CHANNEL */
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+ struct csrow_info *csrow; /* the parent */
+};
+
+
+struct csrow_info {
+ unsigned long first_page; /* first page number in dimm */
+ unsigned long last_page; /* last page number in dimm */
+ unsigned long page_mask; /* used for interleaving -
+ 0UL for non intlv */
+ u32 nr_pages; /* number of pages in csrow */
+ u32 grain; /* granularity of reported error in bytes */
+ int csrow_idx; /* the chip-select row */
+ enum dev_type dtype; /* memory device type */
+ u32 ue_count; /* Uncorrectable Errors for this csrow */
+ u32 ce_count; /* Correctable Errors for this csrow */
+ enum mem_type mtype; /* memory csrow type */
+ enum edac_type edac_mode; /* EDAC mode for this csrow */
+ struct mem_ctl_info *mci; /* the parent */
+
+ struct kobject kobj; /* sysfs kobject for this csrow */
+
+ /* FIXME the number of CHANNELs might need to become dynamic */
+ u32 nr_channels;
+ struct channel_info *channels;
+};
+
+
+struct mem_ctl_info {
+ struct list_head link; /* for global list of mem_ctl_info structs */
+ unsigned long mtype_cap; /* memory types supported by mc */
+ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
+ unsigned long edac_cap; /* configuration capabilities - this is
+ closely related to edac_ctl_cap. The
+ difference is that the controller
+ may be capable of s4ecd4ed which would
+ be listed in edac_ctl_cap, but if
+ channels aren't capable of s4ecd4ed then the
+ edac_cap would not have that capability. */
+ unsigned long scrub_cap; /* chipset scrub capabilities */
+ enum scrub_type scrub_mode; /* current scrub mode */
+
+ enum mci_sysfs_status sysfs_active; /* status of sysfs */
+
+ /* pointer to edac checking routine */
+ void (*edac_check) (struct mem_ctl_info * mci);
+ /*
+ * Remaps memory pages: controller pages to physical pages.
+ * For most MC's, this will be NULL.
+ */
+ /* FIXME - why not send the phys page to begin with? */
+ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+ unsigned long page);
+ int mc_idx;
+ int nr_csrows;
+ struct csrow_info *csrows;
+ /*
+ * FIXME - what about controllers on other busses? - IDs must be
+ * unique. pdev pointer should be sufficiently unique, but
+ * BUS:SLOT.FUNC numbers may not be unique.
+ */
+ struct pci_dev *pdev;
+ const char *mod_name;
+ const char *mod_ver;
+ const char *ctl_name;
+ char proc_name[MC_PROC_NAME_MAX_LEN + 1];
+ void *pvt_info;
+ u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
+ u32 ce_noinfo_count; /* Correctable Errors w/o info */
+ u32 ue_count; /* Total Uncorrectable Errors for this MC */
+ u32 ce_count; /* Total Correctable Errors for this MC */
+ unsigned long start_time; /* mci load start time (in jiffies) */
+
+ /* this stuff is for safe removal of mc devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion complete;
+
+ /* edac sysfs device control */
+ struct kobject edac_mci_kobj;
+};
+
+
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
+ u8 value, u8 mask)
+{
+ if (mask != 0xff) {
+ u8 buf;
+ pci_read_config_byte(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+ pci_write_config_byte(pdev, offset, value);
+}
+
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+ u16 value, u16 mask)
+{
+ if (mask != 0xffff) {
+ u16 buf;
+ pci_read_config_word(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+ pci_write_config_word(pdev, offset, value);
+}
+
+
+/* write all or some bits in a dword-register*/
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+ u32 value, u32 mask)
+{
+ if (mask != 0xffff) {
+ u32 buf;
+ pci_read_config_dword(pdev, offset, &buf);
+ value &= mask;
+ buf &= ~mask;
+ value |= buf;
+ }
+ pci_write_config_dword(pdev, offset, value);
+}
+
+
+#ifdef CONFIG_EDAC_DEBUG
+void edac_mc_dump_channel(struct channel_info *chan);
+void edac_mc_dump_mci(struct mem_ctl_info *mci);
+void edac_mc_dump_csrow(struct csrow_info *csrow);
+#endif /* CONFIG_EDAC_DEBUG */
+
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern int edac_mc_del_mc(struct mem_ctl_info *mci);
+
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+ unsigned long page);
+
+extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
+ *pdev);
+
+extern void edac_mc_scrub_block(unsigned long page,
+ unsigned long offset, u32 size);
+
+/*
+ * The no info errors are used when error overflows are reported.
+ * There are a limited number of error logging registers that can
+ * be exausted. When all registers are exhausted and an additional
+ * error occurs then an error overflow register records that an
+ * error occured and the type of error, but doesn't have any
+ * further information. The ce/ue versions make for cleaner
+ * reporting logic and function interface - reduces conditional
+ * statement clutter and extra function arguments.
+ */
+extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ unsigned long syndrome,
+ int row, int channel, const char *msg);
+
+extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+ const char *msg);
+
+extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ int row, const char *msg);
+
+extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+ const char *msg);
+
+/*
+ * This kmalloc's and initializes all the structures.
+ * Can't be used if all structures don't have the same lifetime.
+ */
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
+ unsigned nr_csrows, unsigned nr_chans);
+
+/* Free an mc previously allocated by edac_mc_alloc() */
+extern void edac_mc_free(struct mem_ctl_info *mci);
+
+
+#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644
index 00000000000..52596e75f9c
--- /dev/null
+++ b/drivers/edac/i82860_edac.c
@@ -0,0 +1,299 @@
+/*
+ * Intel 82860 Memory Controller kernel module
+ * (C) 2005 Red Hat (http://www.redhat.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Ben Woodard <woodard@redhat.com>
+ * shamelessly copied from and based upon the edac_i82875 driver
+ * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_82860_0
+#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
+#endif /* PCI_DEVICE_ID_INTEL_82860_0 */
+
+#define I82860_MCHCFG 0x50
+#define I82860_GBA 0x60
+#define I82860_GBA_MASK 0x7FF
+#define I82860_GBA_SHIFT 24
+#define I82860_ERRSTS 0xC8
+#define I82860_EAP 0xE4
+#define I82860_DERRCTL_STS 0xE2
+
+enum i82860_chips {
+ I82860 = 0,
+};
+
+struct i82860_dev_info {
+ const char *ctl_name;
+};
+
+struct i82860_error_info {
+ u16 errsts;
+ u32 eap;
+ u16 derrsyn;
+ u16 errsts2;
+};
+
+static const struct i82860_dev_info i82860_devs[] = {
+ [I82860] = {
+ .ctl_name = "i82860"},
+};
+
+static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
+ has already registered driver */
+
+static int i82860_registered = 1;
+
+static void i82860_get_error_info (struct mem_ctl_info *mci,
+ struct i82860_error_info *info)
+{
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts);
+ pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
+ pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn);
+ pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+ /*
+ * If the error is the same for both reads then the first set of reads
+ * is valid. If there is a change then there is a CE no info and the
+ * second set of reads is valid and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0003))
+ return;
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
+ pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
+ &info->derrsyn);
+ }
+}
+
+static int i82860_process_error_info (struct mem_ctl_info *mci,
+ struct i82860_error_info *info, int handle_errors)
+{
+ int row;
+
+ if (!(info->errsts2 & 0x0003))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0002)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ 0, "i82860 UE");
+
+ return 1;
+}
+
+static void i82860_check(struct mem_ctl_info *mci)
+{
+ struct i82860_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i82860_get_error_info(mci, &info);
+ i82860_process_error_info(mci, &info, 1);
+}
+
+static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_cumul_size;
+
+ u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+
+ /* RDRAM has channels but these don't map onto the abstractions that
+ edac uses.
+ The device groups from the GRA registers seem to map reasonably
+ well onto the notion of a chip select row.
+ There are 16 GRA registers and since the name is associated with
+ the channel and the GRA registers map to physical devices so we are
+ going to make 1 channel for group.
+ */
+ mci = edac_mc_alloc(0, 16, 1);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->pdev = pdev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ /* I"m not sure about this but I think that all RDRAM is SECDED */
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ /* adjust FLAGS */
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.1.2.6 $";
+ mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+ mci->edac_check = i82860_check;
+ mci->ctl_page_to_phys = NULL;
+
+ pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim);
+ mchcfg_ddim = mchcfg_ddim & 0x180;
+
+ /*
+ * The group row boundary (GRA) reg values are boundary address
+ * for each DRAM row with a granularity of 16MB. GRA regs are
+ * cumulative; therefore GRA15 will contain the total memory contained
+ * in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u16 value;
+ u32 cumul_size;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
+ &value);
+
+ cumul_size = (value & I82860_GBA_MASK) <<
+ (I82860_GBA_SHIFT - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ csrow->mtype = MEM_RMBS;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
+
+ /* clear counters */
+ pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n",
+ __func__);
+ edac_mc_free(mci);
+ } else {
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ rc = 0;
+ }
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82860_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ printk(KERN_INFO "i82860 init one\n");
+ if(pci_enable_device(pdev) < 0)
+ return -EIO;
+ rc = i82860_probe1(pdev, ent->driver_data);
+ if(rc == 0)
+ mci_pdev = pci_dev_get(pdev);
+ return rc;
+}
+
+static void __devexit i82860_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ mci = edac_mc_find_mci_by_pdev(pdev);
+ if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82860},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
+
+static struct pci_driver i82860_driver = {
+ .name = BS_MOD_STR,
+ .probe = i82860_init_one,
+ .remove = __devexit_p(i82860_remove_one),
+ .id_table = i82860_pci_tbl,
+};
+
+static int __init i82860_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
+ return pci_rc;
+
+ if (!mci_pdev) {
+ i82860_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82860_0, NULL);
+ if (mci_pdev == NULL) {
+ debugf0("860 pci_get_device fail\n");
+ return -ENODEV;
+ }
+ pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("860 init fail\n");
+ pci_dev_put(mci_pdev);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static void __exit i82860_exit(void)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_unregister_driver(&i82860_driver);
+ if (!i82860_registered) {
+ i82860_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i82860_init);
+module_exit(i82860_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+ ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
+MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644
index 00000000000..009c08fe5d6
--- /dev/null
+++ b/drivers/edac/i82875p_edac.c
@@ -0,0 +1,532 @@
+/*
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Contributors:
+ * Wang Zhenyu at intel.com
+ *
+ * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_0
+#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
+#endif /* PCI_DEVICE_ID_INTEL_82875_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_6
+#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
+#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
+
+
+/* four csrows in dual channel, eight in single channel */
+#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+
+/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
+#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
+ *
+ * 31:12 block address
+ * 11:0 reserved
+ */
+
+#define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I82875P_DES 0x5d /* DRAM Error Status (8b)
+ *
+ * 7:1 reserved
+ * 0 Error channel 0/1
+ */
+
+#define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Sftwr Generated SMI
+ * 7 ECC UE
+ * 6 reserved
+ * 5 MCH detects unimplemented cycle
+ * 4 AGP access outside GA
+ * 3 Invalid AGP access
+ * 2 Invalid GA translation table
+ * 1 Unsupported AGP command
+ * 0 ECC CE
+ */
+
+#define I82875P_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:10 reserved
+ * 9 SERR on non-DRAM lock
+ * 8 SERR on ECC UE
+ * 7 SERR on ECC CE
+ * 6 target abort on high exception
+ * 5 detect unimplemented cyc
+ * 4 AGP access outside of GA
+ * 3 SERR on invalid AGP access
+ * 2 invalid translation table
+ * 1 SERR on unsupported AGP command
+ * 0 reserved
+ */
+
+
+/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
+#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
+ *
+ * 15:10 reserved
+ * 9 fast back-to-back - ro 0
+ * 8 SERR enable - ro 0
+ * 7 addr/data stepping - ro 0
+ * 6 parity err enable - ro 0
+ * 5 VGA palette snoop - ro 0
+ * 4 mem wr & invalidate - ro 0
+ * 3 special cycle - ro 0
+ * 2 bus master - ro 0
+ * 1 mem access dev6 - 0(dis),1(en)
+ * 0 IO access dev3 - 0(dis),1(en)
+ */
+
+#define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
+ *
+ * 31:12 mem base addr [31:12]
+ * 11:4 address mask - ro 0
+ * 3 prefetchable - ro 0(non),1(pre)
+ * 2:1 mem type - ro 0
+ * 0 mem space - ro 0
+ */
+
+/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
+
+#define I82875P_DRB_SHIFT 26 /* 64MiB grain */
+#define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
+ *
+ * 7 reserved
+ * 6:0 64MiB row boundary addr
+ */
+
+#define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
+ *
+ * 7 reserved
+ * 6:4 row attr row 1
+ * 3 reserved
+ * 2:0 row attr row 0
+ *
+ * 000 = 4KiB
+ * 001 = 8KiB
+ * 010 = 16KiB
+ * 011 = 32KiB
+ */
+
+#define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
+ *
+ * 31:30 reserved
+ * 29 init complete
+ * 28:23 reserved
+ * 22:21 nr chan 00=1,01=2
+ * 20 reserved
+ * 19:18 Data Integ Mode 00=none,01=ecc
+ * 17:11 reserved
+ * 10:8 refresh mode
+ * 7 reserved
+ * 6:4 mode select
+ * 3:2 reserved
+ * 1:0 DRAM type 01=DDR
+ */
+
+
+enum i82875p_chips {
+ I82875P = 0,
+};
+
+
+struct i82875p_pvt {
+ struct pci_dev *ovrfl_pdev;
+ void *ovrfl_window;
+};
+
+
+struct i82875p_dev_info {
+ const char *ctl_name;
+};
+
+
+struct i82875p_error_info {
+ u16 errsts;
+ u32 eap;
+ u8 des;
+ u8 derrsyn;
+ u16 errsts2;
+};
+
+
+static const struct i82875p_dev_info i82875p_devs[] = {
+ [I82875P] = {
+ .ctl_name = "i82875p"},
+};
+
+static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
+ has already registered driver */
+static int i82875p_registered = 1;
+
+static void i82875p_get_error_info (struct mem_ctl_info *mci,
+ struct i82875p_error_info *info)
+{
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts);
+ pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
+ pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
+ pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn);
+ pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+
+ /*
+ * If the error is the same then we can for both reads then
+ * the first set of reads is valid. If there is a change then
+ * there is a CE no info and the second set of reads is valid
+ * and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0081))
+ return;
+ if ((info->errsts ^ info->errsts2) & 0x0081) {
+ pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
+ pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
+ pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
+ &info->derrsyn);
+ }
+}
+
+static int i82875p_process_error_info (struct mem_ctl_info *mci,
+ struct i82875p_error_info *info, int handle_errors)
+{
+ int row, multi_chan;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts2 & 0x0081))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0081) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0080)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ multi_chan ? (info->des & 0x1) : 0,
+ "i82875p CE");
+
+ return 1;
+}
+
+
+static void i82875p_check(struct mem_ctl_info *mci)
+{
+ struct i82875p_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i82875p_get_error_info(mci, &info);
+ i82875p_process_error_info(mci, &info, 1);
+}
+
+
+#ifdef CONFIG_PROC_FS
+extern int pci_proc_attach_device(struct pci_dev *);
+#endif
+
+static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ struct i82875p_pvt *pvt = NULL;
+ unsigned long last_cumul_size;
+ struct pci_dev *ovrfl_pdev;
+ void __iomem *ovrfl_window = NULL;
+
+ u32 drc;
+ u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
+ u32 nr_chans;
+ u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+ if (!ovrfl_pdev) {
+ /*
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+ pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
+ ovrfl_pdev =
+ pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+ if (!ovrfl_pdev)
+ goto fail;
+ }
+#ifdef CONFIG_PROC_FS
+ if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
+ printk(KERN_ERR "MC: " __FILE__
+ ": %s(): Failed to attach overflow device\n",
+ __func__);
+ goto fail;
+ }
+#endif /* CONFIG_PROC_FS */
+ if (pci_enable_device(ovrfl_pdev)) {
+ printk(KERN_ERR "MC: " __FILE__
+ ": %s(): Failed to enable overflow device\n",
+ __func__);
+ goto fail;
+ }
+
+ if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
+#ifdef CORRECT_BIOS
+ goto fail;
+#endif
+ }
+ /* cache is irrelevant for PCI bus reads/writes */
+ ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
+ pci_resource_len(ovrfl_pdev, 0));
+
+ if (!ovrfl_window) {
+ printk(KERN_ERR "MC: " __FILE__
+ ": %s(): Failed to ioremap bar6\n", __func__);
+ goto fail;
+ }
+
+ /* need to find out the number of channels */
+ drc = readl(ovrfl_window + I82875P_DRC);
+ drc_chan = ((drc >> 21) & 0x1);
+ nr_chans = drc_chan + 1;
+ drc_ddim = (drc >> 18) & 0x1;
+
+ mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
+ nr_chans);
+
+ if (!mci) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+ mci->pdev = pdev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_UNKNOWN;
+ /* adjust FLAGS */
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.5.2.11 $";
+ mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+ mci->edac_check = i82875p_check;
+ mci->ctl_page_to_phys = NULL;
+
+ debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+
+ pvt = (struct i82875p_pvt *) mci->pvt_info;
+ pvt->ovrfl_pdev = ovrfl_pdev;
+ pvt->ovrfl_window = ovrfl_window;
+
+ /*
+ * The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ */
+ for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+ u8 value;
+ u32 cumul_size;
+ struct csrow_info *csrow = &mci->csrows[index];
+
+ value = readb(ovrfl_window + I82875P_DRB + index);
+ cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
+ debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+ __func__, index, cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ csrow->mtype = MEM_DDR;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
+
+ /* clear counters */
+ pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+ fail:
+ if (mci)
+ edac_mc_free(mci);
+
+ if (ovrfl_window)
+ iounmap(ovrfl_window);
+
+ if (ovrfl_pdev) {
+ pci_release_regions(ovrfl_pdev);
+ pci_disable_device(ovrfl_pdev);
+ }
+
+ /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+ return rc;
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82875p_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ printk(KERN_INFO "i82875p init one\n");
+ if(pci_enable_device(pdev) < 0)
+ return -EIO;
+ rc = i82875p_probe1(pdev, ent->driver_data);
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+ return rc;
+}
+
+
+static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i82875p_pvt *pvt = NULL;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+ return;
+
+ pvt = (struct i82875p_pvt *) mci->pvt_info;
+ if (pvt->ovrfl_window)
+ iounmap(pvt->ovrfl_window);
+
+ if (pvt->ovrfl_pdev) {
+#ifdef CORRECT_BIOS
+ pci_release_regions(pvt->ovrfl_pdev);
+#endif /*CORRECT_BIOS */
+ pci_disable_device(pvt->ovrfl_pdev);
+ pci_dev_put(pvt->ovrfl_pdev);
+ }
+
+ if (edac_mc_del_mc(mci))
+ return;
+
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
+ {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82875P},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
+
+
+static struct pci_driver i82875p_driver = {
+ .name = BS_MOD_STR,
+ .probe = i82875p_init_one,
+ .remove = __devexit_p(i82875p_remove_one),
+ .id_table = i82875p_pci_tbl,
+};
+
+
+static int __init i82875p_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+ pci_rc = pci_register_driver(&i82875p_driver);
+ if (pci_rc < 0)
+ return pci_rc;
+ if (mci_pdev == NULL) {
+ i82875p_registered = 0;
+ mci_pdev =
+ pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82875_0, NULL);
+ if (!mci_pdev) {
+ debugf0("875p pci_get_device fail\n");
+ return -ENODEV;
+ }
+ pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("875p init fail\n");
+ pci_dev_put(mci_pdev);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+
+static void __exit i82875p_exit(void)
+{
+ debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_unregister_driver(&i82875p_driver);
+ if (!i82875p_registered) {
+ i82875p_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+
+module_init(i82875p_init);
+module_exit(i82875p_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644
index 00000000000..e90892831b9
--- /dev/null
+++ b/drivers/edac/r82600_edac.c
@@ -0,0 +1,407 @@
+/*
+ * Radisys 82600 Embedded chipset Memory Controller kernel module
+ * (C) 2005 EADS Astrium
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
+ * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
+ *
+ * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Written with reference to 82600 High Integration Dual PCI System
+ * Controller Data Book:
+ * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * references to this document given in []
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+/* Radisys say "The 82600 integrates a main memory SDRAM controller that
+ * supports up to four banks of memory. The four banks can support a mix of
+ * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
+ * each of which can be any size from 16MB to 512MB. Both registered (control
+ * signals buffered) and unbuffered DIMM types are supported. Mixing of
+ * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
+ * is not allowed. The 82600 SDRAM interface operates at the same frequency as
+ * the CPU bus, 66MHz, 100MHz or 133MHz."
+ */
+
+#define R82600_NR_CSROWS 4
+#define R82600_NR_CHANS 1
+#define R82600_NR_DIMMS 4
+
+#define R82600_BRIDGE_ID 0x8200
+
+/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
+#define R82600_DRAMC 0x57 /* Various SDRAM related control bits
+ * all bits are R/W
+ *
+ * 7 SDRAM ISA Hole Enable
+ * 6 Flash Page Mode Enable
+ * 5 ECC Enable: 1=ECC 0=noECC
+ * 4 DRAM DIMM Type: 1=
+ * 3 BIOS Alias Disable
+ * 2 SDRAM BIOS Flash Write Enable
+ * 1:0 SDRAM Refresh Rate: 00=Disabled
+ * 01=7.8usec (256Mbit SDRAMs)
+ * 10=15.6us 11=125usec
+ */
+
+#define R82600_SDRAMC 0x76 /* "SDRAM Control Register"
+ * More SDRAM related control bits
+ * all bits are R/W
+ *
+ * 15:8 Reserved.
+ *
+ * 7:5 Special SDRAM Mode Select
+ *
+ * 4 Force ECC
+ *
+ * 1=Drive ECC bits to 0 during
+ * write cycles (i.e. ECC test mode)
+ *
+ * 0=Normal ECC functioning
+ *
+ * 3 Enhanced Paging Enable
+ *
+ * 2 CAS# Latency 0=3clks 1=2clks
+ *
+ * 1 RAS# to CAS# Delay 0=3 1=2
+ *
+ * 0 RAS# Precharge 0=3 1=2
+ */
+
+#define R82600_EAP 0x80 /* ECC Error Address Pointer Register
+ *
+ * 31 Disable Hardware Scrubbing (RW)
+ * 0=Scrub on corrected read
+ * 1=Don't scrub on corrected read
+ *
+ * 30:12 Error Address Pointer (RO)
+ * Upper 19 bits of error address
+ *
+ * 11:4 Syndrome Bits (RO)
+ *
+ * 3 BSERR# on multibit error (RW)
+ * 1=enable 0=disable
+ *
+ * 2 NMI on Single Bit Eror (RW)
+ * 1=NMI triggered by SBE n.b. other
+ * prerequeists
+ * 0=NMI not triggered
+ *
+ * 1 MBE (R/WC)
+ * read 1=MBE at EAP (see above)
+ * read 0=no MBE, or SBE occurred first
+ * write 1=Clear MBE status (must also
+ * clear SBE)
+ * write 0=NOP
+ *
+ * 1 SBE (R/WC)
+ * read 1=SBE at EAP (see above)
+ * read 0=no SBE, or MBE occurred first
+ * write 1=Clear SBE status (must also
+ * clear MBE)
+ * write 0=NOP
+ */
+
+#define R82600_DRBA 0x60 /* + 0x60..0x63 SDRAM Row Boundry Address
+ * Registers
+ *
+ * 7:0 Address lines 30:24 - upper limit of
+ * each row [p57]
+ */
+
+struct r82600_error_info {
+ u32 eapr;
+};
+
+
+static unsigned int disable_hardware_scrub = 0;
+
+
+static void r82600_get_error_info (struct mem_ctl_info *mci,
+ struct r82600_error_info *info)
+{
+ pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr);
+
+ if (info->eapr & BIT(0))
+ /* Clear error to allow next error to be reported [p.62] */
+ pci_write_bits32(mci->pdev, R82600_EAP,
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
+
+ if (info->eapr & BIT(1))
+ /* Clear error to allow next error to be reported [p.62] */
+ pci_write_bits32(mci->pdev, R82600_EAP,
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
+}
+
+
+static int r82600_process_error_info (struct mem_ctl_info *mci,
+ struct r82600_error_info *info, int handle_errors)
+{
+ int error_found;
+ u32 eapaddr, page;
+ u32 syndrome;
+
+ error_found = 0;
+
+ /* bits 30:12 store the upper 19 bits of the 32 bit error address */
+ eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
+ /* Syndrome in bits 11:4 [p.62] */
+ syndrome = (info->eapr >> 4) & 0xFF;
+
+ /* the R82600 reports at less than page *
+ * granularity (upper 19 bits only) */
+ page = eapaddr >> PAGE_SHIFT;
+
+ if (info->eapr & BIT(0)) { /* CE? */
+ error_found = 1;
+
+ if (handle_errors)
+ edac_mc_handle_ce(
+ mci, page, 0, /* not avail */
+ syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, /* channel */
+ mci->ctl_name);
+ }
+
+ if (info->eapr & BIT(1)) { /* UE? */
+ error_found = 1;
+
+ if (handle_errors)
+ /* 82600 doesn't give enough info */
+ edac_mc_handle_ue(mci, page, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
+ }
+
+ return error_found;
+}
+
+static void r82600_check(struct mem_ctl_info *mci)
+{
+ struct r82600_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ r82600_get_error_info(mci, &info);
+ r82600_process_error_info(mci, &info, 1);
+}
+
+static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ int index;
+ struct mem_ctl_info *mci = NULL;
+ u8 dramcr;
+ u32 ecc_on;
+ u32 reg_sdram;
+ u32 eapr;
+ u32 scrub_disabled;
+ u32 sdram_refresh_rate;
+ u32 row_high_limit_last = 0;
+ u32 eap_init_bits;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+
+ pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
+ pci_read_config_dword(pdev, R82600_EAP, &eapr);
+
+ ecc_on = dramcr & BIT(5);
+ reg_sdram = dramcr & BIT(4);
+ scrub_disabled = eapr & BIT(31);
+ sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
+
+ debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
+ __func__, sdram_refresh_rate);
+
+ debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
+ dramcr);
+
+ mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
+
+ if (mci == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ mci->pdev = pdev;
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ /* FIXME try to work out if the chip leads have been *
+ * used for COM2 instead on this board? [MA6?] MAYBE: */
+
+ /* On the R82600, the pins for memory bits 72:65 - i.e. the *
+ * EC bits are shared with the pins for COM2 (!), so if COM2 *
+ * is enabled, we assume COM2 is wired up, and thus no EDAC *
+ * is possible. */
+ mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ if (ecc_on) {
+ if (scrub_disabled)
+ debugf3("MC: " __FILE__ ": %s(): mci = %p - "
+ "Scrubbing disabled! EAP: %#0x\n", __func__,
+ mci, eapr);
+ } else
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ mci->mod_name = BS_MOD_STR;
+ mci->mod_ver = "$Revision: 1.1.2.6 $";
+ mci->ctl_name = "R82600";
+ mci->edac_check = r82600_check;
+ mci->ctl_page_to_phys = NULL;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ struct csrow_info *csrow = &mci->csrows[index];
+ u8 drbar; /* sDram Row Boundry Address Register */
+ u32 row_high_limit;
+ u32 row_base;
+
+ /* find the DRAM Chip Select Base address and mask */
+ pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
+
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
+ mci->mc_idx, __func__, index, drbar);
+
+ row_high_limit = ((u32) drbar << 24);
+/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
+
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x \n",
+ mci->mc_idx, __func__, index, row_high_limit,
+ row_high_limit_last);
+
+ /* Empty row [p.57] */
+ if (row_high_limit == row_high_limit_last)
+ continue;
+
+ row_base = row_high_limit_last;
+
+ csrow->first_page = row_base >> PAGE_SHIFT;
+ csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+ csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ /* Error address is top 19 bits - so granularity is *
+ * 14 bits */
+ csrow->grain = 1 << 14;
+ csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ /* FIXME - check that this is unknowable with this chipset */
+ csrow->dtype = DEV_UNKNOWN;
+
+ /* Mode is global on 82600 */
+ csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ row_high_limit_last = row_high_limit;
+ }
+
+ /* clear counters */
+ /* FIXME should we? */
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+
+ /* Clear error flags to allow next error to be reported [p.62] */
+ /* Test systems seem to always have the UE flag raised on boot */
+
+ eap_init_bits = BIT(0) & BIT(1);
+ if (disable_hardware_scrub) {
+ eap_init_bits |= BIT(31);
+ debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
+ "(scrub on error)\n", __func__);
+ }
+
+ pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
+ eap_init_bits);
+
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit r82600_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return r82600_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit r82600_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
+ !edac_mc_del_mc(mci))
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
+
+
+static struct pci_driver r82600_driver = {
+ .name = BS_MOD_STR,
+ .probe = r82600_init_one,
+ .remove = __devexit_p(r82600_remove_one),
+ .id_table = r82600_pci_tbl,
+};
+
+
+static int __init r82600_init(void)
+{
+ return pci_register_driver(&r82600_driver);
+}
+
+
+static void __exit r82600_exit(void)
+{
+ pci_unregister_driver(&r82600_driver);
+}
+
+
+module_init(r82600_init);
+module_exit(r82600_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
+ "on behalf of EADS Astrium");
+MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
+
+module_param(disable_hardware_scrub, bool, 0644);
+MODULE_PARM_DESC(disable_hardware_scrub,
+ "If set, disable the chipset's automatic scrub for CEs");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index ca99979c868..8b3515f394a 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -8,6 +8,7 @@
* completion notification.
*/
+#include <asm/types.h>
#include <asm/atomic.h>
#include <linux/blkdev.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c69918671c..626508afe1b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -7,6 +7,7 @@ menu "Network device support"
config NETDEVICES
depends on NET
+ default y if UML
bool "Network device support"
---help---
You can say N here if you don't intend to connect your Linux box to
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b8953de5664..b508812e97a 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1002,6 +1002,8 @@ static int __devinit ace_init(struct net_device *dev)
mac1 = 0;
for(i = 0; i < 4; i++) {
+ int tmp;
+
mac1 = mac1 << 8;
tmp = read_eeprom_byte(dev, 0x8c+i);
if (tmp < 0) {
@@ -1012,6 +1014,8 @@ static int __devinit ace_init(struct net_device *dev)
}
mac2 = 0;
for(i = 4; i < 8; i++) {
+ int tmp;
+
mac2 = mac2 << 8;
tmp = read_eeprom_byte(dev, 0x8c+i);
if (tmp < 0) {
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index df9d6e80c4f..c3267e4e1bb 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1399,7 +1399,6 @@ static int b44_open(struct net_device *dev)
b44_init_rings(bp);
b44_init_hw(bp);
- netif_carrier_off(dev);
b44_check_phy(bp);
err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
@@ -1464,7 +1463,7 @@ static int b44_close(struct net_device *dev)
#endif
b44_halt(bp);
b44_free_rings(bp);
- netif_carrier_off(bp->dev);
+ netif_carrier_off(dev);
spin_unlock_irq(&bp->lock);
@@ -2000,6 +1999,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+ netif_carrier_off(dev);
+
err = b44_get_invariants(bp);
if (err) {
printk(KERN_ERR PFX "Problem fetching invariants of chip, "
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2582d98ef5c..4ff006c3762 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -576,7 +576,7 @@ static int bond_update_speed_duplex(struct slave *slave)
slave->duplex = DUPLEX_FULL;
if (slave_dev->ethtool_ops) {
- u32 res;
+ int res;
if (!slave_dev->ethtool_ops->get_settings) {
return -1;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index dde631f8f68..6e295fce5c6 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
cas_disable_irq(cp, i);
}
+static inline void cas_buffer_init(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_set((atomic_t *)&page->lru.next, 1);
+}
+
+static inline int cas_buffer_count(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ return atomic_read((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_inc(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_inc((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_dec(cas_page_t *cp)
+{
+ struct page *page = cp->buffer;
+ atomic_dec((atomic_t *)&page->lru.next);
+}
+
static void cas_enable_irq(struct cas *cp, const int ring)
{
if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
{
pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE);
+ cas_buffer_dec(page);
__free_pages(page->buffer, cp->page_order);
kfree(page);
return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer)
goto page_err;
+ cas_buffer_init(page);
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE);
return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
list_for_each_safe(elem, tmp, &list) {
cas_page_t *page = list_entry(elem, cas_page_t, list);
- if (page_count(page->buffer) > 1)
+ if (cas_buffer_count(page) > 1)
continue;
list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
cas_page_t *page = cp->rx_pages[1][index];
cas_page_t *new;
- if (page_count(page->buffer) == 1)
+ if (cas_buffer_count(page) == 1)
return page;
new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
cas_page_t **page1 = cp->rx_pages[1];
/* swap if buffer is in use */
- if (page_count(page0[index]->buffer) > 1) {
+ if (cas_buffer_count(page0[index]) > 1) {
cas_page_t *new = cas_page_spare(cp, index);
if (new) {
page1[index] = page0[index];
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->len += hlen - swivel;
get_page(page->buffer);
+ cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = off;
frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
frag++;
get_page(page->buffer);
+ cas_buffer_inc(page);
frag->page = page->buffer;
frag->page_offset = 0;
frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
released = 0;
while (entry != last) {
/* make a new buffer if it's still in use */
- if (page_count(page[entry]->buffer) > 1) {
+ if (cas_buffer_count(page[entry]) > 1) {
cas_page_t *new = cas_page_dequeue(cp);
if (!new) {
/* let the timer know that we need to
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 41c75616e91..da0d8a85acc 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3586,7 +3586,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = (u64) pci_map_page
(sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
- txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
+ txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
if (skb_shinfo(skb)->ufo_size)
txdp->Control_1 |= TXD_UFO_EN;
}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index d9ce8c54941..bc36edff205 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2595,15 +2595,11 @@ static int __init serial8250_init(void)
if (ret)
goto out;
- ret = platform_driver_register(&serial8250_isa_driver);
- if (ret)
- goto unreg_uart_drv;
-
serial8250_isa_devs = platform_device_alloc("serial8250",
PLAT8250_DEV_LEGACY);
if (!serial8250_isa_devs) {
ret = -ENOMEM;
- goto unreg_plat_drv;
+ goto unreg_uart_drv;
}
ret = platform_device_add(serial8250_isa_devs);
@@ -2612,12 +2608,13 @@ static int __init serial8250_init(void)
serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
- goto out;
+ ret = platform_driver_register(&serial8250_isa_driver);
+ if (ret == 0)
+ goto out;
+ platform_device_del(serial8250_isa_devs);
put_dev:
platform_device_put(serial8250_isa_devs);
- unreg_plat_drv:
- platform_driver_unregister(&serial8250_isa_driver);
unreg_uart_drv:
uart_unregister_driver(&serial8250_reg);
out:
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 589fb076654..2a912153321 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -940,6 +940,7 @@ enum pci_board_num_t {
pbn_b2_bt_2_921600,
pbn_b2_bt_4_921600,
+ pbn_b3_2_115200,
pbn_b3_4_115200,
pbn_b3_8_115200,
@@ -1311,6 +1312,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.uart_offset = 8,
},
+ [pbn_b3_2_115200] = {
+ .flags = FL_BASE3,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b3_4_115200] = {
.flags = FL_BASE3,
.num_ports = 4,
@@ -2272,6 +2279,9 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_nec_nile4 },
+ { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b3_2_115200 },
{ PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b3_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 5e7199f7b59..9fd1925de36 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -301,7 +301,7 @@ config SERIAL_AT91_TTYAT
depends on SERIAL_AT91=y
help
Say Y here if you wish to have the five internal AT91RM9200 UARTs
- appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the
+ appear as /dev/ttyAT0-4 (major 204, minor 154-158) instead of the
normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
you also want other UARTs, such as external 8250/16C550 compatible
UARTs.
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
index 0e206063d68..2113feb75c3 100644
--- a/drivers/serial/at91_serial.c
+++ b/drivers/serial/at91_serial.c
@@ -222,8 +222,6 @@ static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
while (status & (AT91_US_RXRDY)) {
ch = UART_GET_CHAR(port);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
port->icount.rx++;
flg = TTY_NORMAL;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 5468e5a767e..43e67d6c29d 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -6,7 +6,7 @@
* driver for that.
*
*
- * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void)
misc.name = DEVICE_NAME_DYNAMIC;
retval = misc_register(&misc);
if (retval != 0) {
- printk
- ("Failed to register console device using misc_register.\n");
+ printk(KERN_WARNING "Failed to register console "
+ "device using misc_register.\n");
return -ENODEV;
}
sal_console_uart.major = MISC_MAJOR;
@@ -942,88 +942,75 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
{
unsigned long flags = 0;
struct sn_cons_port *port = &sal_console_port;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
static int stole_lock = 0;
-#endif
BUG_ON(!port->sc_is_asynch);
/* We can't look at the xmit buffer if we're not registered with serial core
* yet. So only do the fancy recovery after registering
*/
- if (port->sc_port.info) {
-
- /* somebody really wants this output, might be an
- * oops, kdb, panic, etc. make sure they get it. */
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
- if (spin_is_locked(&port->sc_port.lock)) {
- int lhead = port->sc_port.info->xmit.head;
- int ltail = port->sc_port.info->xmit.tail;
- int counter, got_lock = 0;
+ if (!port->sc_port.info) {
+ /* Not yet registered with serial core - simple case */
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ return;
+ }
- /*
- * We attempt to determine if someone has died with the
- * lock. We wait ~20 secs after the head and tail ptrs
- * stop moving and assume the lock holder is not functional
- * and plow ahead. If the lock is freed within the time out
- * period we re-get the lock and go ahead normally. We also
- * remember if we have plowed ahead so that we don't have
- * to wait out the time out period again - the asumption
- * is that we will time out again.
- */
+ /* somebody really wants this output, might be an
+ * oops, kdb, panic, etc. make sure they get it. */
+ if (spin_is_locked(&port->sc_port.lock)) {
+ int lhead = port->sc_port.info->xmit.head;
+ int ltail = port->sc_port.info->xmit.tail;
+ int counter, got_lock = 0;
+
+ /*
+ * We attempt to determine if someone has died with the
+ * lock. We wait ~20 secs after the head and tail ptrs
+ * stop moving and assume the lock holder is not functional
+ * and plow ahead. If the lock is freed within the time out
+ * period we re-get the lock and go ahead normally. We also
+ * remember if we have plowed ahead so that we don't have
+ * to wait out the time out period again - the asumption
+ * is that we will time out again.
+ */
- for (counter = 0; counter < 150; mdelay(125), counter++) {
- if (!spin_is_locked(&port->sc_port.lock)
- || stole_lock) {
- if (!stole_lock) {
- spin_lock_irqsave(&port->
- sc_port.lock,
- flags);
- got_lock = 1;
- }
- break;
- } else {
- /* still locked */
- if ((lhead !=
- port->sc_port.info->xmit.head)
- || (ltail !=
- port->sc_port.info->xmit.
- tail)) {
- lhead =
- port->sc_port.info->xmit.
- head;
- ltail =
- port->sc_port.info->xmit.
- tail;
- counter = 0;
- }
+ for (counter = 0; counter < 150; mdelay(125), counter++) {
+ if (!spin_is_locked(&port->sc_port.lock)
+ || stole_lock) {
+ if (!stole_lock) {
+ spin_lock_irqsave(&port->sc_port.lock,
+ flags);
+ got_lock = 1;
}
- }
- /* flush anything in the serial core xmit buffer, raw */
- sn_transmit_chars(port, 1);
- if (got_lock) {
- spin_unlock_irqrestore(&port->sc_port.lock,
- flags);
- stole_lock = 0;
+ break;
} else {
- /* fell thru */
- stole_lock = 1;
+ /* still locked */
+ if ((lhead != port->sc_port.info->xmit.head)
+ || (ltail !=
+ port->sc_port.info->xmit.tail)) {
+ lhead =
+ port->sc_port.info->xmit.head;
+ ltail =
+ port->sc_port.info->xmit.tail;
+ counter = 0;
+ }
}
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- } else {
- stole_lock = 0;
-#endif
- spin_lock_irqsave(&port->sc_port.lock, flags);
- sn_transmit_chars(port, 1);
+ }
+ /* flush anything in the serial core xmit buffer, raw */
+ sn_transmit_chars(port, 1);
+ if (got_lock) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
-
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+ stole_lock = 0;
+ } else {
+ /* fell thru */
+ stole_lock = 1;
}
-#endif
- }
- else {
- /* Not yet registered with serial core - simple case */
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ } else {
+ stole_lock = 0;
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ sn_transmit_chars(port, 1);
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
}
}
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 5fc4a62173d..fa4ae94243c 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -34,6 +34,7 @@ sunserial_console_termios(struct console *con)
char *mode_prop = "ttyX-mode";
char *cd_prop = "ttyX-ignore-cd";
char *dtr_prop = "ttyX-rts-dtr-off";
+ char *ssp_console_modes_prop = "ssp-console-modes";
int baud, bits, stop, cflag;
char parity;
int carrier = 0;
@@ -43,14 +44,39 @@ sunserial_console_termios(struct console *con)
if (!serial_console)
return;
- if (serial_console == 1) {
+ switch (serial_console) {
+ case PROMDEV_OTTYA:
mode_prop[3] = 'a';
cd_prop[3] = 'a';
dtr_prop[3] = 'a';
- } else {
+ break;
+
+ case PROMDEV_OTTYB:
mode_prop[3] = 'b';
cd_prop[3] = 'b';
dtr_prop[3] = 'b';
+ break;
+
+ case PROMDEV_ORSC:
+
+ nd = prom_pathtoinode("rsc");
+ if (!nd) {
+ strcpy(mode, "115200,8,n,1,-");
+ goto no_options;
+ }
+
+ if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
+ strcpy(mode, "115200,8,n,1,-");
+ goto no_options;
+ }
+
+ memset(mode, 0, sizeof(mode));
+ prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
+ goto no_options;
+
+ default:
+ strcpy(mode, "9600,8,n,1,-");
+ goto no_options;
}
topnd = prom_getchild(prom_root_node);
@@ -110,6 +136,10 @@ no_options:
case 9600: cflag |= B9600; break;
case 19200: cflag |= B19200; break;
case 38400: cflag |= B38400; break;
+ case 57600: cflag |= B57600; break;
+ case 115200: cflag |= B115200; break;
+ case 230400: cflag |= B230400; break;
+ case 460800: cflag |= B460800; break;
default: baud = 9600; cflag |= B9600; break;
}
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 7e773ff76c6..8bcaebcc0ad 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -897,9 +897,6 @@ static int sunsab_console_setup(struct console *con, char *options)
sunserial_console_termios(con);
- /* Firmware console speed is limited to 150-->38400 baud so
- * this hackish cflag thing is OK.
- */
switch (con->cflag & CBAUD) {
case B150: baud = 150; break;
case B300: baud = 300; break;
@@ -910,6 +907,10 @@ static int sunsab_console_setup(struct console *con, char *options)
default: case B9600: baud = 9600; break;
case B19200: baud = 19200; break;
case B38400: baud = 38400; break;
+ case B57600: baud = 57600; break;
+ case B115200: baud = 115200; break;
+ case B230400: baud = 230400; break;
+ case B460800: baud = 460800; break;
};
/*