summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig27
-rw-r--r--drivers/edac/Makefile7
-rw-r--r--drivers/edac/amd64_edac.c106
-rw-r--r--drivers/edac/amd64_edac.h23
-rw-r--r--drivers/edac/amd64_edac_inj.c49
-rw-r--r--drivers/edac/cpc925_edac.c6
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_device.c5
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/edac/edac_mce_amd.c32
-rw-r--r--drivers/edac/edac_pci.c4
-rw-r--r--drivers/edac/i3200_edac.c527
-rw-r--r--drivers/edac/i5000_edac.c7
-rw-r--r--drivers/edac/i5400_edac.c89
-rw-r--r--drivers/edac/mpc85xx_edac.c32
-rw-r--r--drivers/edac/mv64x60_edac.c22
16 files changed, 758 insertions, 184 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index a3ca18e2d7c..55c9c59b3f7 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -47,6 +47,18 @@ config EDAC_DEBUG_VERBOSE
Source file name and line number where debugging message
printed will be added to debugging message.
+ config EDAC_DECODE_MCE
+ tristate "Decode MCEs in human-readable form (only on AMD for now)"
+ depends on CPU_SUP_AMD && X86_MCE
+ default y
+ ---help---
+ Enable this option if you want to decode Machine Check Exceptions
+ occuring on your machine in human-readable form.
+
+ You should definitely say Y here in case you want to decode MCEs
+ which occur really early upon boot, before the module infrastructure
+ has been initialized.
+
config EDAC_MM_EDAC
tristate "Main Memory EDAC (Error Detection And Correction) reporting"
help
@@ -59,7 +71,7 @@ config EDAC_MM_EDAC
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
- depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && CPU_SUP_AMD
+ depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
help
Support for error detection and correction on the AMD 64
Families of Memory Controllers (K8, F10h and F11h)
@@ -133,6 +145,13 @@ config EDAC_I3000
Support for error detection and correction on the Intel
3000 and 3010 server chipsets.
+config EDAC_I3200
+ tristate "Intel 3200"
+ depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL
+ help
+ Support for error detection and correction on the Intel
+ 3200 and 3210 server chipsets.
+
config EDAC_X38
tristate "Intel X38"
depends on EDAC_MM_EDAC && PCI && X86
@@ -176,11 +195,11 @@ config EDAC_I5100
San Clemente MCH.
config EDAC_MPC85XX
- tristate "Freescale MPC85xx"
- depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
+ tristate "Freescale MPC83xx / MPC85xx"
+ depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx)
help
Support for error detection and correction on the Freescale
- MPC8560, MPC8540, MPC8548
+ MPC8349, MPC8560, MPC8540, MPC8548
config EDAC_MV64X60
tristate "Marvell MV64x60"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index cfa033ce53a..bc5dc232a0f 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -6,7 +6,6 @@
# GNU General Public License.
#
-
obj-$(CONFIG_EDAC) := edac_stub.o
obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
@@ -17,9 +16,7 @@ ifdef CONFIG_PCI
edac_core-objs += edac_pci.o edac_pci_sysfs.o
endif
-ifdef CONFIG_CPU_SUP_AMD
-edac_core-objs += edac_mce_amd.o
-endif
+obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
@@ -32,6 +29,7 @@ obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
+obj-$(CONFIG_EDAC_I3200) += i3200_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
@@ -49,3 +47,4 @@ obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
+
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4e551e63b6d..a38831c8264 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
-static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
-static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
+static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
+static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
/*
* See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
/* Map from a CSROW entry to the mask entry that operates on it */
static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
{
- return csrow >> (pvt->num_dcsm >> 3);
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
+ return csrow;
+ else
+ return csrow >> 1;
}
/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
intlv_en = pvt->dram_IntlvEn[0];
if (intlv_en == 0) {
- for (node_id = 0; ; ) {
+ for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
if (amd64_base_limit_match(pvt, sys_addr, node_id))
- break;
-
- if (++node_id >= DRAM_REG_COUNT)
- goto err_no_match;
+ goto found;
}
- goto found;
+ goto err_no_match;
}
- if (unlikely((intlv_en != (0x01 << 8)) &&
- (intlv_en != (0x03 << 8)) &&
- (intlv_en != (0x07 << 8)))) {
+ if (unlikely((intlv_en != 0x01) &&
+ (intlv_en != 0x03) &&
+ (intlv_en != 0x07))) {
amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
"IntlvEn field of DRAM Base Register for node 0: "
- "This probably indicates a BIOS bug.\n", intlv_en);
+ "this probably indicates a BIOS bug.\n", intlv_en);
return NULL;
}
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
- if ((pvt->dram_limit[node_id] & intlv_en) == bits)
+ if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
break; /* intlv_sel field matches */
if (++node_id >= DRAM_REG_COUNT)
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
/* sanity test for sys_addr */
if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
amd64_printk(KERN_WARNING,
- "%s(): sys_addr 0x%lx falls outside base/limit "
- "address range for node %d with node interleaving "
- "enabled.\n", __func__, (unsigned long)sys_addr,
- node_id);
+ "%s(): sys_addr 0x%llx falls outside base/limit "
+ "address range for node %d with node interleaving "
+ "enabled.\n",
+ __func__, sys_addr, node_id);
return NULL;
}
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
* base/mask register pair, test the condition shown near the start of
* section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
*/
- for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+ for (csrow = 0; csrow < pvt->cs_count; csrow++) {
/* This DRAM chip select is disabled on this node */
if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
u64 base, mask;
pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
+ BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
base = base_from_dct_base(pvt, csrow);
mask = mask_from_dct_mask(pvt, csrow);
@@ -962,35 +962,27 @@ err_reg:
*/
static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
{
- if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
+ pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
+ pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
+ pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
+ pvt->dcs_shift = REV_E_DCS_SHIFT;
+ pvt->cs_count = 8;
+ pvt->num_dcsm = 8;
+ } else {
pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
- switch (boot_cpu_data.x86) {
- case 0xf:
- pvt->num_dcsm = REV_F_DCSM_COUNT;
- break;
-
- case 0x10:
- pvt->num_dcsm = F10_DCSM_COUNT;
- break;
-
- case 0x11:
- pvt->num_dcsm = F11_DCSM_COUNT;
- break;
-
- default:
- amd64_printk(KERN_ERR, "Unsupported family!\n");
- break;
+ if (boot_cpu_data.x86 == 0x11) {
+ pvt->cs_count = 4;
+ pvt->num_dcsm = 2;
+ } else {
+ pvt->cs_count = 8;
+ pvt->num_dcsm = 4;
}
- } else {
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_E_DCS_SHIFT;
- pvt->num_dcsm = REV_E_DCSM_COUNT;
}
}
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
amd64_set_dct_base_and_mask(pvt);
- for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
+ for (cs = 0; cs < pvt->cs_count; cs++) {
reg = K8_DCSB0 + (cs * 4);
err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
&pvt->dcsb0[cs]);
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
* different from the node that detected the error.
*/
src_mci = find_mc_by_sys_addr(mci, SystemAddress);
- if (src_mci) {
+ if (!src_mci) {
amd64_mc_printk(mci, KERN_ERR,
"failed to map error address 0x%lx to a node\n",
(unsigned long)SystemAddress);
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
- pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
- ((u64) low_base & 0xFFFF0000))) << 8;
+ pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
+ (((u64)low_base & 0xFFFF0000) << 8);
low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
* Extract address values and form a LIMIT address. Limit is the HIGHEST
* memory location of the region, so low 24 bits need to be all ones.
*/
- low_limit |= 0x0000FFFF;
- pvt->dram_limit[dram] =
- ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
+ pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
+ (((u64) low_limit & 0xFFFF0000) << 8) |
+ 0x00FFFFFF;
}
static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
- for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+ for (csrow = 0; csrow < pvt->cs_count; csrow++) {
cs_base = amd64_get_dct_base(pvt, cs, csrow);
if (!(cs_base & K8_DCSB_CS_ENABLE))
@@ -2262,7 +2254,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
{
u32 ec = ERROR_CODE(info->nbsl);
u32 xec = EXT_ERROR_CODE(info->nbsl);
- int ecc_type = info->nbsh & (0x3 << 13);
+ int ecc_type = (info->nbsh >> 13) & 0x3;
/* Bail early out if this was an 'observed' error */
if (PP(ec) == K8_NBSL_PP_OBS)
@@ -2497,7 +2489,7 @@ err_reg:
* NOTE: CPU Revision Dependent code
*
* Input:
- * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
+ * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
);
- for (i = 0; i < CHIPSELECT_COUNT; i++) {
+ for (i = 0; i < pvt->cs_count; i++) {
csrow = &mci->csrows[i];
if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
goto err_exit;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
+ mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
if (!mci)
goto err_exit;
@@ -3171,7 +3163,7 @@ static int __init amd64_edac_init(void)
opstate_init();
if (cache_k8_northbridges() < 0)
- goto err_exit;
+ return err;
err = pci_register_driver(&amd64_pci_driver);
if (err)
@@ -3197,8 +3189,6 @@ static int __init amd64_edac_init(void)
err_2nd_stage:
debugf0("2nd stage failed\n");
-
-err_exit:
pci_unregister_driver(&amd64_pci_driver);
return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8ea07e2715d..c6f359a8520 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -132,6 +132,8 @@
#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
#define EDAC_MOD_STR "amd64_edac"
+#define EDAC_MAX_NUMNODES 8
+
/* Extended Model from CPUID, for CPU Revision numbers */
#define OPTERON_CPU_LE_REV_C 0
#define OPTERON_CPU_REV_D 1
@@ -142,7 +144,7 @@
#define OPTERON_CPU_REV_FA 5
/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define CHIPSELECT_COUNT 8
+#define MAX_CS_COUNT 8
#define DRAM_REG_COUNT 8
@@ -193,7 +195,6 @@
*/
#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
#define REV_E_DCS_SHIFT 4
-#define REV_E_DCSM_COUNT 8
#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
#define REV_F_F1Xh_DCS_SHIFT 8
@@ -204,9 +205,6 @@
*/
#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
#define REV_F_DCS_SHIFT 8
-#define REV_F_DCSM_COUNT 4
-#define F10_DCSM_COUNT 4
-#define F11_DCSM_COUNT 2
/* DRAM CS Mask Registers */
#define K8_DCSM0 0x60
@@ -374,13 +372,11 @@ enum {
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
- BIT(17) | \
- ((bits) & 0xF))
+ BIT(17) | bits)
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
- BIT(16) | \
- ((bits) & 0xF))
+ BIT(16) | bits)
#define K8_NBCAP 0xE8
#define K8_NBCAP_CORES (BIT(12)|BIT(13))
@@ -445,12 +441,12 @@ struct amd64_pvt {
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
/* DRAM CS Base Address Registers F2x[1,0][5C:40] */
- u32 dcsb0[CHIPSELECT_COUNT];
- u32 dcsb1[CHIPSELECT_COUNT];
+ u32 dcsb0[MAX_CS_COUNT];
+ u32 dcsb1[MAX_CS_COUNT];
/* DRAM CS Mask Registers F2x[1,0][6C:60] */
- u32 dcsm0[CHIPSELECT_COUNT];
- u32 dcsm1[CHIPSELECT_COUNT];
+ u32 dcsm0[MAX_CS_COUNT];
+ u32 dcsm1[MAX_CS_COUNT];
/*
* Decoded parts of DRAM BASE and LIMIT Registers
@@ -470,6 +466,7 @@ struct amd64_pvt {
*/
u32 dcsb_base; /* DCSB base bits */
u32 dcsm_mask; /* DCSM mask bits */
+ u32 cs_count; /* num chip selects (== num DCSB registers) */
u32 num_dcsm; /* Number of DCSM registers */
u32 dcs_mask_notused; /* DCSM notused mask bits */
u32 dcs_shift; /* DCSB and DCSM shift value */
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index d3675b76b3a..29f1f7a612d 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,5 +1,11 @@
#include "amd64_edac.h"
+static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.section);
+}
+
/*
* store error injection section value which refers to one of 4 16-byte sections
* within a 64-byte cacheline
@@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
ret = strict_strtoul(data, 10, &value);
if (ret != -EINVAL) {
+
+ if (value > 3) {
+ amd64_printk(KERN_WARNING,
+ "%s: invalid section 0x%lx\n",
+ __func__, value);
+ return -EINVAL;
+ }
+
pvt->injection.section = (u32) value;
return count;
}
return ret;
}
+static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.word);
+}
+
/*
* store error injection word value which refers to one of 9 16-bit word of the
* 16-byte (128-bit + ECC bits) section
@@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
ret = strict_strtoul(data, 10, &value);
if (ret != -EINVAL) {
- value = (value <= 8) ? value : 0;
- pvt->injection.word = (u32) value;
+ if (value > 8) {
+ amd64_printk(KERN_WARNING,
+ "%s: invalid word 0x%lx\n",
+ __func__, value);
+ return -EINVAL;
+ }
+ pvt->injection.word = (u32) value;
return count;
}
return ret;
}
+static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
+}
+
/*
* store 16 bit error injection vector which enables injecting errors to the
* corresponding bit within the error injection word above. When used during a
@@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
ret = strict_strtoul(data, 16, &value);
if (ret != -EINVAL) {
- pvt->injection.bit_map = (u32) value & 0xFFFF;
+ if (value & 0xFFFF0000) {
+ amd64_printk(KERN_WARNING,
+ "%s: invalid EccVector: 0x%lx\n",
+ __func__, value);
+ return -EINVAL;
+ }
+ pvt->injection.bit_map = (u32) value;
return count;
}
return ret;
@@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
.name = "inject_section",
.mode = (S_IRUGO | S_IWUSR)
},
- .show = NULL,
+ .show = amd64_inject_section_show,
.store = amd64_inject_section_store,
},
{
@@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
.name = "inject_word",
.mode = (S_IRUGO | S_IWUSR)
},
- .show = NULL,
+ .show = amd64_inject_word_show,
.store = amd64_inject_word_store,
},
{
@@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
.name = "inject_ecc_vector",
.mode = (S_IRUGO | S_IWUSR)
},
- .show = NULL,
+ .show = amd64_inject_ecc_vector_show,
.store = amd64_inject_ecc_vector_store,
},
{
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 8c54196b5ab..3d50274f134 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -885,14 +885,14 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdev->name)) {
cpc925_printk(KERN_ERR, "Unable to request mem region\n");
res = -EBUSY;
goto err1;
}
- vbase = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
+ vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!vbase) {
cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
res = -ENOMEM;
@@ -953,7 +953,7 @@ err3:
cpc925_mc_exit(mci);
edac_mc_free(mci);
err2:
- devm_release_mem_region(&pdev->dev, r->start, r->end-r->start+1);
+ devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
err1:
devres_release_group(&pdev->dev, cpc925_probe);
out:
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 871c13b4c14..12f355cafdb 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -286,7 +286,7 @@ enum scrub_type {
* is irrespective of the memory devices being mounted
* on both sides of the memory stick.
*
- * Socket set: All of the memory sticks that are required for for
+ * Socket set: All of the memory sticks that are required for
* a single memory access or all of the memory sticks
* spanned by a chip-select row. A single socket set
* has two chip-select rows and if double-sided sticks
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index b02a6a69a8f..d5e13c94714 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -356,7 +356,6 @@ static void complete_edac_device_list_del(struct rcu_head *head)
edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
INIT_LIST_HEAD(&edac_dev->link);
- complete(&edac_dev->removal_complete);
}
/*
@@ -369,10 +368,8 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info
*edac_device)
{
list_del_rcu(&edac_device->link);
-
- init_completion(&edac_device->removal_complete);
call_rcu(&edac_device->rcu, complete_edac_device_list_del);
- wait_for_completion(&edac_device->removal_complete);
+ rcu_barrier();
}
/*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 335b7ebdb11..b629c41756f 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -418,16 +418,14 @@ static void complete_mc_list_del(struct rcu_head *head)
mci = container_of(head, struct mem_ctl_info, rcu);
INIT_LIST_HEAD(&mci->link);
- complete(&mci->complete);
}
static void del_mc_from_global_list(struct mem_ctl_info *mci)
{
atomic_dec(&edac_handlers);
list_del_rcu(&mci->link);
- init_completion(&mci->complete);
call_rcu(&mci->rcu, complete_mc_list_del);
- wait_for_completion(&mci->complete);
+ rcu_barrier();
}
/**
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 0c21c370c9d..713ed7d3724 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -3,6 +3,7 @@
static bool report_gart_errors;
static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
+static void (*orig_mce_callback)(struct mce *m);
void amd_report_gart_errors(bool v)
{
@@ -362,7 +363,7 @@ static inline void amd_decode_err_code(unsigned int ec)
pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
}
-void decode_mce(struct mce *m)
+static void amd_decode_mce(struct mce *m)
{
struct err_regs regs;
int node, ecc;
@@ -420,3 +421,32 @@ void decode_mce(struct mce *m)
amd_decode_err_code(m->status & 0xffff);
}
+
+static int __init mce_amd_init(void)
+{
+ /*
+ * We can decode MCEs for Opteron and later CPUs:
+ */
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+ (boot_cpu_data.x86 >= 0xf)) {
+ /* safe the default decode mce callback */
+ orig_mce_callback = x86_mce_decode_callback;
+
+ x86_mce_decode_callback = amd_decode_mce;
+ }
+
+ return 0;
+}
+early_initcall(mce_amd_init);
+
+#ifdef MODULE
+static void __exit mce_amd_exit(void)
+{
+ x86_mce_decode_callback = orig_mce_callback;
+}
+
+MODULE_DESCRIPTION("AMD MCE decoder");
+MODULE_ALIAS("edac-mce-amd");
+MODULE_LICENSE("GPL");
+module_exit(mce_amd_exit);
+#endif
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 30b585b1d60..efb5d565078 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -174,7 +174,6 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
pci = container_of(head, struct edac_pci_ctl_info, rcu);
INIT_LIST_HEAD(&pci->link);
- complete(&pci->complete);
}
/*
@@ -185,9 +184,8 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
{
list_del_rcu(&pci->link);
- init_completion(&pci->complete);
call_rcu(&pci->rcu, complete_edac_pci_list_del);
- wait_for_completion(&pci->complete);
+ rcu_barrier();
}
#if 0
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
new file mode 100644
index 00000000000..fde4db91c4d
--- /dev/null
+++ b/drivers/edac/i3200_edac.c
@@ -0,0 +1,527 @@
+/*
+ * Intel 3200/3210 Memory Controller kernel module
+ * Copyright (C) 2008-2009 Akamai Technologies, Inc.
+ * Portions by Hitoshi Mitake <h.mitake@gmail.com>.
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include "edac_core.h"
+
+#define I3200_REVISION "1.1"
+
+#define EDAC_MOD_STR "i3200_edac"
+
+#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
+
+#define I3200_RANKS 8
+#define I3200_RANKS_PER_CHANNEL 4
+#define I3200_CHANNELS 2
+
+/* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
+#define I3200_MCHBAR_HIGH 0x4c
+#define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
+#define I3200_MMR_WINDOW_SIZE 16384
+
+#define I3200_TOM 0xa0 /* Top of Memory (16b)
+ *
+ * 15:10 reserved
+ * 9:0 total populated physical memory
+ */
+#define I3200_TOM_MASK 0x3ff /* bits 9:0 */
+#define I3200_TOM_SHIFT 26 /* 64MiB grain */
+
+#define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15 reserved
+ * 14 Isochronous TBWRR Run Behind FIFO Full
+ * (ITCV)
+ * 13 Isochronous TBWRR Run Behind FIFO Put
+ * (ITSTV)
+ * 12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR (GTSE)
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 reserved
+ * 7 DRAM Throttle Flag (DTF)
+ * 6:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define I3200_ERRSTS_UE 0x0002
+#define I3200_ERRSTS_CE 0x0001
+#define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
+
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
+ *
+ * 15:10 reserved
+ * 9:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
+#define I3200_DRB_MASK 0x3ff /* bits 9:0 */
+#define I3200_DRB_SHIFT 26 /* 64MiB grain */
+
+#define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ * 1 Multiple Bit Error Status (MERRSTS)
+ * 0 Correctable Error Status (CERRSTS)
+ */
+#define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
+#define I3200_ECCERRLOG_CE 0x1
+#define I3200_ECCERRLOG_UE 0x2
+#define I3200_ECCERRLOG_RANK_BITS 0x18000000
+#define I3200_ECCERRLOG_RANK_SHIFT 27
+#define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
+#define I3200_ECCERRLOG_SYNDROME_SHIFT 16
+#define I3200_CAPID0 0xe0 /* P.95 of spec for details */
+
+struct i3200_priv {
+ void __iomem *window;
+};
+
+static int nr_channels;
+
+static int how_many_channels(struct pci_dev *pdev)
+{
+ unsigned char capid0_8b; /* 8th byte of CAPID0 */
+
+ pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
+ if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
+ debugf0("In single channel mode.\n");
+ return 1;
+ } else {
+ debugf0("In dual channel mode.\n");
+ return 2;
+ }
+}
+
+static unsigned long eccerrlog_syndrome(u64 log)
+{
+ return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
+ I3200_ECCERRLOG_SYNDROME_SHIFT;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+ u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
+ I3200_ECCERRLOG_RANK_SHIFT);
+ return rank | (channel * I3200_RANKS_PER_CHANNEL);
+}
+
+enum i3200_chips {
+ I3200 = 0,
+};
+
+struct i3200_dev_info {
+ const char *ctl_name;
+};
+
+struct i3200_error_info {
+ u16 errsts;
+ u16 errsts2;
+ u64 eccerrlog[I3200_CHANNELS];
+};
+
+static const struct i3200_dev_info i3200_devs[] = {
+ [I3200] = {
+ .ctl_name = "i3200"
+ },
+};
+
+static struct pci_dev *mci_pdev;
+static int i3200_registered = 1;
+
+
+static void i3200_clear_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
+ I3200_ERRSTS_BITS);
+}
+
+static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
+ struct i3200_error_info *info)
+{
+ struct pci_dev *pdev;
+ struct i3200_priv *priv = mci->pvt_info;
+ void __iomem *window = priv->window;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
+ if (!(info->errsts & I3200_ERRSTS_BITS))
+ return;
+
+ info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
+
+ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
+ info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
+ }
+
+ i3200_clear_error_info(mci);
+}
+
+static void i3200_process_error_info(struct mem_ctl_info *mci,
+ struct i3200_error_info *info)
+{
+ int channel;
+ u64 log;
+
+ if (!(info->errsts & I3200_ERRSTS_BITS))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ for (channel = 0; channel < nr_channels; channel++) {
+ log = info->eccerrlog[channel];
+ if (log & I3200_ECCERRLOG_UE) {
+ edac_mc_handle_ue(mci, 0, 0,
+ eccerrlog_row(channel, log),
+ "i3200 UE");
+ } else if (log & I3200_ECCERRLOG_CE) {
+ edac_mc_handle_ce(mci, 0, 0,
+ eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log), 0,
+ "i3200 CE");
+ }
+ }
+}
+
+static void i3200_check(struct mem_ctl_info *mci)
+{
+ struct i3200_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i3200_get_and_clear_error_info(mci, &info);
+ i3200_process_error_info(mci, &info);
+}
+
+
+void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
+{
+ union {
+ u64 mchbar;
+ struct {
+ u32 mchbar_low;
+ u32 mchbar_high;
+ };
+ } u;
+ void __iomem *window;
+
+ pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
+ pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
+ u.mchbar &= I3200_MCHBAR_MASK;
+
+ if (u.mchbar != (resource_size_t)u.mchbar) {
+ printk(KERN_ERR
+ "i3200: mmio space beyond accessible range (0x%llx)\n",
+ (unsigned long long)u.mchbar);
+ return NULL;
+ }
+
+ window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
+ if (!window)
+ printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
+ (unsigned long long)u.mchbar);
+
+ return window;
+}
+
+
+static void i3200_get_drbs(void __iomem *window,
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
+{
+ int i;
+
+ for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
+ drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
+ drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
+ }
+}
+
+static bool i3200_is_stacked(struct pci_dev *pdev,
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
+{
+ u16 tom;
+
+ pci_read_config_word(pdev, I3200_TOM, &tom);
+ tom &= I3200_TOM_MASK;
+
+ return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
+}
+
+static unsigned long drb_to_nr_pages(
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
+ int channel, int rank)
+{
+ int n;
+
+ n = drbs[channel][rank];
+ if (rank > 0)
+ n -= drbs[channel][rank - 1];
+ if (stacked && (channel == 1) &&
+ drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
+ n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
+
+ n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
+ return n;
+}
+
+static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_page;
+ u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
+ bool stacked;
+ void __iomem *window;
+ struct i3200_priv *priv;
+
+ debugf0("MC: %s()\n", __func__);
+
+ window = i3200_map_mchbar(pdev);
+ if (!window)
+ return -ENODEV;
+
+ i3200_get_drbs(window, drbs);
+ nr_channels = how_many_channels(pdev);
+
+ mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
+ nr_channels, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I3200_REVISION;
+ mci->ctl_name = i3200_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i3200_check;
+ mci->ctl_page_to_phys = NULL;
+ priv = mci->pvt_info;
+ priv->window = window;
+
+ stacked = i3200_is_stacked(pdev, drbs);
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 64MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ */
+ last_page = -1UL;
+ for (i = 0; i < mci->nr_csrows; i++) {
+ unsigned long nr_pages;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ nr_pages = drb_to_nr_pages(drbs, stacked,
+ i / I3200_RANKS_PER_CHANNEL,
+ i % I3200_RANKS_PER_CHANNEL);
+
+ if (nr_pages == 0) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_page + 1;
+ last_page += nr_pages;
+ csrow->last_page = last_page;
+ csrow->nr_pages = nr_pages;
+
+ csrow->grain = nr_pages << PAGE_SHIFT;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ i3200_clear_error_info(mci);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+fail:
+ iounmap(window);
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+static int __devinit i3200_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i3200_probe1(pdev, ent->driver_data);
+ if (!mci_pdev)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i3200_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i3200_priv *priv;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+
+ priv = mci->pvt_info;
+ iounmap(priv->window);
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3200},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
+
+static struct pci_driver i3200_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i3200_init_one,
+ .remove = __devexit_p(i3200_remove_one),
+ .id_table = i3200_pci_tbl,
+};
+
+static int __init i3200_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ pci_rc = pci_register_driver(&i3200_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (!mci_pdev) {
+ i3200_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_3200_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("i3200 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("i3200 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i3200_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i3200_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&i3200_driver);
+ if (!i3200_registered) {
+ i3200_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i3200_init);
+module_exit(i3200_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies, Inc.");
+MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index d335086f4a2..77a9579d716 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1173,7 +1173,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
- where, pvt->b0_mtr[slot_row]);
+ where, pvt->b1_mtr[slot_row]);
} else {
pvt->b1_mtr[slot_row] = 0;
}
@@ -1232,7 +1232,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
struct csrow_info *p_csrow;
int empty, channel_count;
int max_csrows;
- int mtr;
+ int mtr, mtr1;
int csrow_megs;
int channel;
int csrow;
@@ -1251,9 +1251,10 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
/* use branch 0 for the basis */
mtr = pvt->b0_mtr[csrow >> 1];
+ mtr1 = pvt->b1_mtr[csrow >> 1];
/* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
+ if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
continue;
/* FAKE OUT VALUES, FIXME */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index b08b6d8e2dc..f99d10655ed 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -46,9 +46,10 @@
/* Limits for i5400 */
#define NUM_MTRS_PER_BRANCH 4
#define CHANNELS_PER_BRANCH 2
+#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
#define MAX_CHANNELS 4
-#define MAX_DIMMS (MAX_CHANNELS * 4) /* Up to 4 DIMM's per channel */
-#define MAX_CSROWS (MAX_DIMMS * 2) /* max possible csrows per channel */
+/* max possible csrows per channel */
+#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
/* Device 16,
* Function 0: System Address
@@ -331,7 +332,6 @@ static const struct i5400_dev_info i5400_devs[] = {
struct i5400_dimm_info {
int megabytes; /* size, 0 means not present */
- int dual_rank;
};
/* driver private data structure */
@@ -849,11 +849,9 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
int n;
/* There is one MTR for each slot pair of FB-DIMMs,
- Each slot may have one or two ranks (2 csrows),
Each slot pair may be at branch 0 or branch 1.
- So, csrow should be divided by eight
*/
- n = csrow >> 3;
+ n = csrow;
if (n >= NUM_MTRS_PER_BRANCH) {
debugf0("ERROR: trying to access an invalid csrow: %d\n",
@@ -905,25 +903,22 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << (csrow >> 1))) {
- dinfo->dual_rank = MTR_DIMM_RANK(mtr);
-
- if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
+ if (amb_present_reg & (1 << csrow)) {
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add thenumber of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+ /* add the number of RANK bits */
+ addrBits += MTR_DIMM_RANK(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
}
}
}
@@ -951,12 +946,12 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
return;
}
- /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ /* Scan all the actual CSROWS
* and calculate the information for each DIMM
* Start with the highest csrow first, to display it first
* and work toward the 0th csrow
*/
- max_csrows = pvt->maxdimmperch * 2;
+ max_csrows = pvt->maxdimmperch;
for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
/* on an odd csrow, first output a 'boundary' marker,
@@ -1064,7 +1059,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
/* Get the set of MTR[0-3] regs by each branch */
for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
- int where = MTR0 + (slot_row * sizeof(u32));
+ int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
pci_read_config_word(pvt->branch_0, where,
@@ -1146,7 +1141,7 @@ static int i5400_init_csrows(struct mem_ctl_info *mci)
pvt = mci->pvt_info;
channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch * 2;
+ max_csrows = pvt->maxdimmperch;
empty = 1; /* Assume NO memory */
@@ -1215,28 +1210,6 @@ static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
}
/*
- * i5400_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
- *
- * ask the device how many channels are present and how many CSROWS
- * as well
- */
-static void i5400_get_dimm_and_channel_counts(struct pci_dev *pdev,
- int *num_dimms_per_channel,
- int *num_channels)
-{
- u8 value;
-
- /* Need to retrieve just how many channels and dimms per channel are
- * supported on this memory controller
- */
- pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
- *num_dimms_per_channel = (int)value * 2;
-
- pci_read_config_byte(pdev, MAXCH, &value);
- *num_channels = (int)value;
-}
-
-/*
* i5400_probe1 Probe for ONE instance of device to see if it is
* present.
* return:
@@ -1263,22 +1236,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* Ask the devices for the number of CSROWS and CHANNELS so
- * that we can calculate the memory resources, etc
- *
- * The Chipset will report what it can handle which will be greater
- * or equal to what the motherboard manufacturer will implement.
- *
- * As we don't have a motherboard identification routine to determine
+ /* As we don't have a motherboard identification routine to determine
* actual number of slots/dimms per channel, we thus utilize the
* resource as specified by the chipset. Thus, we might have
* have more DIMMs per channel than actually on the mobo, but this
* allows the driver to support upto the chipset max, without
* some fancy mobo determination.
*/
- i5400_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
- &num_channels);
- num_csrows = num_dimms_per_channel * 2;
+ num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
+ num_channels = MAX_CHANNELS;
+ num_csrows = num_dimms_per_channel;
debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
__func__, num_channels, num_dimms_per_channel, num_csrows);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 3f2ccfc6407..cf27402af97 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -26,7 +26,9 @@
#include "mpc85xx_edac.h"
static int edac_dev_idx;
+#ifdef CONFIG_PCI
static int edac_pci_idx;
+#endif
static int edac_mc_idx;
static u32 orig_ddr_err_disable;
@@ -41,7 +43,9 @@ static u32 orig_pci_err_en;
#endif
static u32 orig_l2_err_disable;
+#ifdef CONFIG_MPC85xx
static u32 orig_hid1[2];
+#endif
/************************ MC SYSFS parts ***********************************/
@@ -646,6 +650,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
{ .compatible = "fsl,mpc8560-l2-cache-controller", },
{ .compatible = "fsl,mpc8568-l2-cache-controller", },
{ .compatible = "fsl,mpc8572-l2-cache-controller", },
+ { .compatible = "fsl,p2020-l2-cache-controller", },
{},
};
@@ -788,19 +793,20 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
csrow = &mci->csrows[index];
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
- start = (cs_bnds & 0xfff0000) << 4;
- end = ((cs_bnds & 0xfff) << 20);
- if (start)
- start |= 0xfffff;
- if (end)
- end |= 0xfffff;
+
+ start = (cs_bnds & 0xffff0000) >> 16;
+ end = (cs_bnds & 0x0000ffff);
if (start == end)
continue; /* not populated */
+ start <<= (24 - PAGE_SHIFT);
+ end <<= (24 - PAGE_SHIFT);
+ end |= (1 << (24 - PAGE_SHIFT)) - 1;
+
csrow->first_page = start >> PAGE_SHIFT;
csrow->last_page = end >> PAGE_SHIFT;
- csrow->nr_pages = csrow->last_page + 1 - csrow->first_page;
+ csrow->nr_pages = end + 1 - start;
csrow->grain = 8;
csrow->mtype = mtype;
csrow->dtype = DEV_UNKNOWN;
@@ -984,6 +990,8 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,mpc8560-memory-controller", },
{ .compatible = "fsl,mpc8568-memory-controller", },
{ .compatible = "fsl,mpc8572-memory-controller", },
+ { .compatible = "fsl,mpc8349-memory-controller", },
+ { .compatible = "fsl,p2020-memory-controller", },
{},
};
@@ -999,13 +1007,13 @@ static struct of_platform_driver mpc85xx_mc_err_driver = {
},
};
-
+#ifdef CONFIG_MPC85xx
static void __init mpc85xx_mc_clear_rfxe(void *data)
{
orig_hid1[smp_processor_id()] = mfspr(SPRN_HID1);
mtspr(SPRN_HID1, (orig_hid1[smp_processor_id()] & ~0x20000));
}
-
+#endif
static int __init mpc85xx_mc_init(void)
{
@@ -1038,26 +1046,32 @@ static int __init mpc85xx_mc_init(void)
printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
#endif
+#ifdef CONFIG_MPC85xx
/*
* need to clear HID1[RFXE] to disable machine check int
* so we can catch it
*/
if (edac_op_state == EDAC_OPSTATE_INT)
on_each_cpu(mpc85xx_mc_clear_rfxe, NULL, 0);
+#endif
return 0;
}
module_init(mpc85xx_mc_init);
+#ifdef CONFIG_MPC85xx
static void __exit mpc85xx_mc_restore_hid1(void *data)
{
mtspr(SPRN_HID1, orig_hid1[smp_processor_id()]);
}
+#endif
static void __exit mpc85xx_mc_exit(void)
{
+#ifdef CONFIG_MPC85xx
on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
+#endif
#ifdef CONFIG_PCI
of_unregister_platform_driver(&mpc85xx_pci_err_driver);
#endif
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 5131aaae8e0..a6b9fec13a7 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -90,7 +90,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev)
return -ENOENT;
}
- pci_serr = ioremap(r->start, r->end - r->start + 1);
+ pci_serr = ioremap(r->start, resource_size(r));
if (!pci_serr)
return -ENOMEM;
@@ -140,7 +140,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -150,7 +150,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
pdata->pci_vbase = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->pci_vbase) {
printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
res = -ENOMEM;
@@ -306,7 +306,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while request mem region\n",
__func__);
@@ -316,7 +316,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
pdata->sram_vbase = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->sram_vbase) {
printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
__func__);
@@ -474,7 +474,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -484,7 +484,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->cpu_vbase[0]) {
printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
res = -ENOMEM;
@@ -501,7 +501,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -511,7 +511,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->cpu_vbase[1]) {
printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
res = -ENOMEM;
@@ -726,7 +726,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
if (!devm_request_mem_region(&pdev->dev,
r->start,
- r->end - r->start + 1,
+ resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
@@ -736,7 +736,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
pdata->mc_vbase = devm_ioremap(&pdev->dev,
r->start,
- r->end - r->start + 1);
+ resource_size(r));
if (!pdata->mc_vbase) {
printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
res = -ENOMEM;