summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c93
-rw-r--r--drivers/edac/edac_mce_amd.c24
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/i5100_edac.c252
5 files changed, 242 insertions, 137 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5fdd6daa40e..000dc67b85b 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
+static struct msr *msrs;
+
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
@@ -195,7 +197,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
edac_printk(KERN_DEBUG, EDAC_MC,
"pci-read, sdram scrub control value: %d \n", scrubval);
- for (i = 0; ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
if (scrubrates[i].scrubval == scrubval) {
*bw = scrubrates[i].bandwidth;
status = 0;
@@ -1698,11 +1700,14 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
*/
static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
{
- int dimm, size0, size1;
+ int dimm, size0, size1, factor = 0;
u32 dbam;
u32 *dcsb;
if (boot_cpu_data.x86 == 0xf) {
+ if (pvt->dclr0 & F10_WIDTH_128)
+ factor = 1;
+
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
@@ -1730,7 +1735,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
- dimm * 2, size0, dimm * 2 + 1, size1);
+ dimm * 2, size0 << factor,
+ dimm * 2 + 1, size1 << factor);
}
}
@@ -2343,7 +2349,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
- if (!dct_ganging_enabled(pvt)) {
+ if (!dct_ganging_enabled(pvt) && boot_cpu_data.x86 >= 0x10) {
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
}
@@ -2495,8 +2501,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{
cpumask_var_t mask;
- struct msr *msrs;
- int cpu, nbe, idx = 0;
+ int cpu, nbe;
bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -2507,32 +2512,22 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
get_cpus_on_this_dct_cpumask(mask, nid);
- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
- if (!msrs) {
- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
- __func__);
- free_cpumask_var(mask);
- return false;
- }
-
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) {
- nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ nbe = reg->l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, msrs[idx].q,
+ cpu, reg->q,
(nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
-
- idx++;
}
ret = true;
out:
- kfree(msrs);
free_cpumask_var(mask);
return ret;
}
@@ -2540,8 +2535,7 @@ out:
static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
{
cpumask_var_t cmask;
- struct msr *msrs = NULL;
- int cpu, idx = 0;
+ int cpu;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
@@ -2551,34 +2545,27 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
- if (!msrs) {
- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
- __func__);
- return -ENOMEM;
- }
-
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, cmask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+
if (on) {
- if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & K8_MSR_MCGCTL_NBE)
pvt->flags.ecc_report = 1;
- msrs[idx].l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= K8_MSR_MCGCTL_NBE;
} else {
/*
* Turn off ECC reporting only when it was off before
*/
if (!pvt->flags.ecc_report)
- msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~K8_MSR_MCGCTL_NBE;
}
- idx++;
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
- kfree(msrs);
free_cpumask_var(cmask);
return 0;
@@ -2703,9 +2690,8 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
amd64_printk(KERN_WARNING, "%s", ecc_warning);
return -ENODEV;
}
- } else
- /* CLEAR the override, since BIOS controlled it */
ecc_enable_override = 0;
+ }
return 0;
}
@@ -2942,16 +2928,15 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
amd64_free_mc_sibling_devices(pvt);
- kfree(pvt);
- mci->pvt_info = NULL;
-
- mci_lookup[pvt->mc_node_id] = NULL;
-
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
amd_unregister_ecc_decoder(amd64_decode_bus_error);
/* Free the EDAC CORE resources */
+ mci->pvt_info = NULL;
+ mci_lookup[pvt->mc_node_id] = NULL;
+
+ kfree(pvt);
edac_mc_free(mci);
}
@@ -3028,23 +3013,29 @@ static void amd64_setup_pci_device(void)
static int __init amd64_edac_init(void)
{
int nb, err = -ENODEV;
+ bool load_ok = false;
edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
opstate_init();
if (cache_k8_northbridges() < 0)
- return err;
+ goto err_ret;
+
+ msrs = msrs_alloc();
+ if (!msrs)
+ goto err_ret;
err = pci_register_driver(&amd64_pci_driver);
if (err)
- return err;
+ goto err_pci;
/*
* At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
* amd64_pvt structs. These will be used in the 2nd stage init function
* to finish initialization of the MC instances.
*/
+ err = -ENODEV;
for (nb = 0; nb < num_k8_northbridges; nb++) {
if (!pvt_lookup[nb])
continue;
@@ -3052,16 +3043,21 @@ static int __init amd64_edac_init(void)
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
goto err_2nd_stage;
- }
- amd64_setup_pci_device();
+ load_ok = true;
+ }
- return 0;
+ if (load_ok) {
+ amd64_setup_pci_device();
+ return 0;
+ }
err_2nd_stage:
- debugf0("2nd stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
-
+err_pci:
+ msrs_free(msrs);
+ msrs = NULL;
+err_ret:
return err;
}
@@ -3071,6 +3067,9 @@ static void __exit amd64_edac_exit(void)
edac_pci_release_generic_ctl(amd64_ctl_pci);
pci_unregister_driver(&amd64_pci_driver);
+
+ msrs_free(msrs);
+ msrs = NULL;
}
module_init(amd64_edac_init);
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index c693fcc2213..8fc91a01962 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -299,6 +299,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
if (!handle_errors)
return;
+ /*
+ * GART TLB error reporting is disabled by default. Bail out early.
+ */
+ if (TLB_ERROR(ec) && !report_gart_errors)
+ return;
+
pr_emerg(" Northbridge Error, node %d", node_id);
/*
@@ -310,10 +316,9 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
} else {
- pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
+ pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
}
-
pr_emerg("%s.\n", EXT_ERR_MSG(xec));
if (BUS_ERROR(ec) && nb_bus_decoder)
@@ -333,21 +338,6 @@ static void amd_decode_fr_mce(u64 mc5_status)
static inline void amd_decode_err_code(unsigned int ec)
{
if (TLB_ERROR(ec)) {
- /*
- * GART errors are intended to help graphics driver developers
- * to detect bad GART PTEs. It is recommended by AMD to disable
- * GART table walk error reporting by default[1] (currently
- * being disabled in mce_cpu_quirks()) and according to the
- * comment in mce_cpu_quirks(), such GART errors can be
- * incorrectly triggered. We may see these errors anyway and
- * unless requested by the user, they won't be reported.
- *
- * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
- * AMD NPT family 0Fh processors
- */
- if (!report_gart_errors)
- return;
-
pr_emerg(" Transaction: %s, Cache Level %s\n",
TT_MSG(ec), LL_MSG(ec));
} else if (MEM_ERROR(ec)) {
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 422728cfe99..fb60a877d76 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -534,8 +534,6 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev)
{
u8 header_type;
- debugf0("%s()\n", __func__);
-
get_pci_parity_status(dev, 0);
/* read the device TYPE, looking for bridges */
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 77a9579d716..adc10a2ac5f 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -577,7 +577,13 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
- channel = branch;
+
+ /*
+ * According with i5000 datasheet, bit 28 has no significance
+ * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
+ */
+ channel = branch & 2;
+
bank = NREC_BANK(info->nrecmema);
rank = NREC_RANK(info->nrecmema);
rdwr = NREC_RDWR(info->nrecmema);
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 22db05a67bf..7785d8ffa40 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -9,6 +9,11 @@
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
+ * The intel 5100 has two independent channels. EDAC core currently
+ * can not reflect this configuration so instead the chip-select
+ * rows for each respective channel are layed out one after another,
+ * the first half belonging to channel 0, the second half belonging
+ * to channel 1.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -25,6 +30,8 @@
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
+#define I5100_MC_SCRBEN_MASK (1 << 7)
+#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
@@ -72,11 +79,21 @@
/* bit field accessors */
+static inline u32 i5100_mc_scrben(u32 mc)
+{
+ return mc >> 7 & 1;
+}
+
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
+static inline u32 i5100_mc_scrbdone(u32 mc)
+{
+ return mc >> 4 & 1;
+}
+
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
@@ -265,42 +282,43 @@ static inline u32 i5100_recmemb_ras(u32 a)
}
/* some generic limits */
-#define I5100_MAX_RANKS_PER_CTLR 6
-#define I5100_MAX_CTLRS 2
+#define I5100_MAX_RANKS_PER_CHAN 6
+#define I5100_CHANNELS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
-#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
+#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
+#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
- int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
+ int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
- * controller, the mapping is fairly obvious but not unique.
- * we map -1 -> NC and assume both controllers use the same
+ * channel, the mapping is fairly obvious but not unique.
+ * we map -1 -> NC and assume both channels use the same
* map...
*
*/
- int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
+ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
- } mir[I5100_MAX_CTLRS];
+ } mir[I5100_CHANNELS];
/* adjusted memory interleave range register */
- unsigned amir[I5100_MAX_CTLRS];
+ unsigned amir[I5100_CHANNELS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
- } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
+ } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
@@ -310,30 +328,33 @@ struct i5100_priv {
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
- } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
+ } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
u64 tolm; /* top of low memory in bytes */
- unsigned ranksperctlr; /* number of ranks per controller */
+ unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
+
+ struct delayed_work i5100_scrubbing;
+ int scrub_enable;
};
-/* map a rank/ctlr to a slot number on the mainboard */
+/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
- const int numrank = priv->dimm_numrank[ctlr][i];
+ const int numrank = priv->dimm_numrank[chan][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
- return i * 2 + ctlr;
+ return i * 2 + chan;
}
return -1;
@@ -374,32 +395,32 @@ static const char *i5100_err_msg(unsigned err)
return "none";
}
-/* convert csrow index into a rank (per controller -- 0..5) */
+/* convert csrow index into a rank (per channel -- 0..5) */
static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow % priv->ranksperctlr;
+ return csrow % priv->ranksperchan;
}
-/* convert csrow index into a controller (0..1) */
-static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
+/* convert csrow index into a channel (0..1) */
+static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow / priv->ranksperctlr;
+ return csrow / priv->ranksperchan;
}
static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
- return ctlr * priv->ranksperctlr + rank;
+ return chan * priv->ranksperchan + rank;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -407,12 +428,12 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ce_count++;
@@ -421,7 +442,7 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -429,23 +450,23 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ue_count++;
mci->csrows[csrow].ue_count++;
}
-static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
+static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
+ struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
@@ -484,7 +505,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
@@ -506,7 +527,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
@@ -534,6 +555,80 @@ static void i5100_check_error(struct mem_ctl_info *mci)
}
}
+/* The i5100 chipset will scrub the entire memory once, then
+ * set a done bit. Continuous scrubbing is achieved by enqueing
+ * delayed work to a workqueue, checking every few minutes if
+ * the scrubbing has completed and if so reinitiating it.
+ */
+
+static void i5100_refresh_scrubbing(struct work_struct *work)
+{
+ struct delayed_work *i5100_scrubbing = container_of(work,
+ struct delayed_work,
+ work);
+ struct i5100_priv *priv = container_of(i5100_scrubbing,
+ struct i5100_priv,
+ i5100_scrubbing);
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (priv->scrub_enable) {
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (i5100_mc_scrbdone(dw)) {
+ dw |= I5100_MC_SCRBEN_MASK;
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ }
+
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+}
+/*
+ * The bandwidth is based on experimentation, feel free to refine it.
+ */
+static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ if (*bandwidth) {
+ priv->scrub_enable = 1;
+ dw |= I5100_MC_SCRBEN_MASK;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ } else {
+ priv->scrub_enable = 0;
+ dw &= ~I5100_MC_SCRBEN_MASK;
+ cancel_delayed_work(&(priv->i5100_scrubbing));
+ }
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
+static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
@@ -557,19 +652,19 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
+ const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
- if (!priv->mtr[ctlr][ctlr_rank].present)
+ if (!priv->mtr[chan][chan_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
- priv->mtr[ctlr][ctlr_rank].numcol +
- priv->mtr[ctlr][ctlr_rank].numrow +
- priv->mtr[ctlr][ctlr_rank].numbank;
+ priv->mtr[chan][chan_rank].numcol +
+ priv->mtr[chan][chan_rank].numrow +
+ priv->mtr[chan][chan_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
@@ -581,11 +676,11 @@ static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
struct pci_dev *pdev = mms[i];
- for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
@@ -644,7 +739,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* fill dimm chip select map
*
* FIXME:
- * o only valid for 4 ranks per controller
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
@@ -653,9 +747,7 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
struct i5100_priv *priv = mci->pvt_info;
int i;
- WARN_ON(priv->ranksperctlr != 4);
-
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
@@ -663,12 +755,21 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
/* only 2 chip selects per slot... */
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 3;
- priv->dimm_csmap[1][0] = 1;
- priv->dimm_csmap[1][1] = 2;
- priv->dimm_csmap[2][0] = 2;
- priv->dimm_csmap[3][0] = 3;
+ if (priv->ranksperchan == 4) {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 3;
+ priv->dimm_csmap[1][0] = 1;
+ priv->dimm_csmap[1][1] = 2;
+ priv->dimm_csmap[2][0] = 2;
+ priv->dimm_csmap[3][0] = 3;
+ } else {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 1;
+ priv->dimm_csmap[1][0] = 2;
+ priv->dimm_csmap[1][1] = 3;
+ priv->dimm_csmap[2][0] = 4;
+ priv->dimm_csmap[2][1] = 5;
+ }
}
static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
@@ -677,10 +778,10 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
- for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
@@ -720,7 +821,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < 5; j++) {
@@ -747,7 +848,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
for (i = 0; i < mci->nr_csrows; i++) {
const unsigned long npages = i5100_npages(mci, i);
- const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
+ const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
if (!npages)
@@ -765,7 +866,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].grain = 32;
mci->csrows[i].csrow_idx = i;
mci->csrows[i].dtype =
- (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
+ (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
mci->csrows[i].ue_count = 0;
mci->csrows[i].ce_count = 0;
mci->csrows[i].mtype = MEM_RDDR2;
@@ -777,7 +878,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].channels[0].csrow = mci->csrows + i;
snprintf(mci->csrows[i].channels[0].label,
sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
+ "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
total_pages += npages;
}
@@ -815,13 +916,6 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
- if (ranksperch != 4) {
- /* FIXME: get 6 ranks / controller to work - need hw... */
- printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
- ret = -ENODEV;
- goto bail_pdev;
- }
-
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
@@ -864,11 +958,21 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->dev = &pdev->dev;
priv = mci->pvt_info;
- priv->ranksperctlr = ranksperch;
+ priv->ranksperchan = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
+ INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
+
+ /* If scrubbing was already enabled by the bios, start maintaining it */
+ pci_read_config_dword(pdev, I5100_MC, &dw);
+ if (i5100_mc_scrben(dw)) {
+ priv->scrub_enable = 1;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
@@ -882,6 +986,8 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
+ mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
+ mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
i5100_init_csrows(mci);
@@ -897,12 +1003,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
- goto bail_mc;
+ goto bail_scrub;
}
return ret;
-bail_mc:
+bail_scrub:
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_ch1:
@@ -935,6 +1043,10 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
return;
priv = mci->pvt_info;
+
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);