summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorfrans <fransmeulenbroeks@gmail.com>2008-08-15 23:14:31 +0200
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-08-16 10:55:33 +0100
commite6cf5df1838c28bb060ac45b5585e48e71bbc740 (patch)
treeb1333e4664fce7dd3c58dd879192e085cb1c2066 /drivers
parent782b7a367d81da005d93b28cb00f9ae086773c24 (diff)
[MTD] [NAND] nand_ecc.c: rewrite for improved performance
This patch improves the performance of the ecc generation code by a factor of 18 on an INTEL D920 CPU, a factor of 7 on MIPS and a factor of 5 on ARM (NSLU2) Signed-off-by: Frans Meulenbroeks <fransmeulenbroeks@gmail.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/nand/nand_ecc.c496
1 files changed, 372 insertions, 124 deletions
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 918a806a847..7129da51bb3 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -1,13 +1,18 @@
/*
- * This file contains an ECC algorithm from Toshiba that detects and
- * corrects 1 bit errors in a 256 byte block of data.
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
*
* drivers/mtd/nand/nand_ecc.c
*
- * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
- * Toshiba America Electronics Components, Inc.
+ * Copyright (C) 2008 Koninklijke Philips Electronics NV.
+ * Author: Frans Meulenbroeks
*
- * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
+ * Completely replaces the previous ECC implementation which was written by:
+ * Steven J. Hill (sjhill@realitydiluted.com)
+ * Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/nand/ecc.txt
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -23,174 +28,417 @@
* with this file; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
- * As a special exception, if other files instantiate templates or use
- * macros or inline functions from these files, or you compile these
- * files and link them with other works to produce a work based on these
- * files, these files do not by themselves cause the resulting work to be
- * covered by the GNU General Public License. However the source code for
- * these files must still be made available in accordance with section (3)
- * of the GNU General Public License.
- *
- * This exception does not invalidate any other reasons why a work based on
- * this file might be covered by the GNU General Public License.
*/
+/*
+ * The STANDALONE macro is useful when running the code outside the kernel
+ * e.g. when running the code in a testbed or a benchmark program.
+ * When STANDALONE is used, the module related macros are commented out
+ * as well as the linux include files.
+ * Instead a private definition of mtd_into is given to satisfy the compiler
+ * (the code does not use mtd_info, so the code does not care)
+ */
+#ifndef STANDALONE
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/nand_ecc.h>
+#else
+typedef uint32_t unsigned long
+struct mtd_info {
+ int dummy;
+};
+#define EXPORT_SYMBOL(x) /* x */
+
+#define MODULE_LICENSE(x) /* x */
+#define MODULE_AUTHOR(x) /* x */
+#define MODULE_DESCRIPTION(x) /* x */
+#endif
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
/*
- * Pre-calculated 256-way 1 byte column parity
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
*/
-static const u_char nand_ecc_precalc_table[] = {
- 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
- 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
- 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
- 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
- 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
- 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
- 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
- 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
- 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
- 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
- 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
- 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
- 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
- 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
- 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
- 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
+static const char bitsperbyte[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ecc data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
+ */
+static const char addressbits[256] = {
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
};
/**
* nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
- * @mtd: MTD block structure
+ * @mtd: MTD block structure (unused)
* @dat: raw data
* @ecc_code: buffer for ECC
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
{
- uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
int i;
+ const uint32_t *bp = (uint32_t *)buf;
+ uint32_t cur; /* current value in buffer */
+ /* rp0..rp15 are the various accumulated parities (per byte) */
+ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15;
+ uint32_t par; /* the cumulative parity for all data */
+ uint32_t tmppar; /* the cumulative parity for this iteration;
+ for rp12 and rp14 at the end of the loop */
+
+ par = 0;
+ rp4 = 0;
+ rp6 = 0;
+ rp8 = 0;
+ rp10 = 0;
+ rp12 = 0;
+ rp14 = 0;
+
+ /*
+ * The loop is unrolled a number of times;
+ * This avoids if statements to decide on which rp value to update
+ * Also we process the data by longwords.
+ * Note: passing unaligned data might give a performance penalty.
+ * It is assumed that the buffers are aligned.
+ * tmppar is the cumulative sum of this iteration.
+ * needed for calculating rp12, rp14 and par
+ * also used as a performance improvement for rp6, rp8 and rp10
+ */
+ for (i = 0; i < 4; i++) {
+ cur = *bp++;
+ tmppar = cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= tmppar;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= tmppar;
- /* Initialize variables */
- reg1 = reg2 = reg3 = 0;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp10 ^= tmppar;
- /* Build up column parity */
- for(i = 0; i < 256; i++) {
- /* Get CP0 - CP5 from table */
- idx = nand_ecc_precalc_table[*dat++];
- reg1 ^= (idx & 0x3f);
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= cur;
- /* All bit XOR = 1 ? */
- if (idx & 0x40) {
- reg3 ^= (uint8_t) i;
- reg2 ^= ~((uint8_t) i);
- }
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0)
+ rp12 ^= tmppar;
+ if ((i & 0x2) == 0)
+ rp14 ^= tmppar;
}
- /* Create non-inverted ECC code from line parity */
- tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */
- tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */
- tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */
- tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */
- tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */
- tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */
- tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */
- tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */
-
- tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */
- tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */
- tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */
- tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */
- tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */
- tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */
- tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */
- tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
-
- /* Calculate final ECC code */
+ /*
+ * handle the fact that we use longword operations
+ * we'll bring rp4..rp14 back to single byte entities by shifting and
+ * xoring first fold the upper and lower 16 bits,
+ * then the upper and lower 8 bits.
+ */
+ rp4 ^= (rp4 >> 16);
+ rp4 ^= (rp4 >> 8);
+ rp4 &= 0xff;
+ rp6 ^= (rp6 >> 16);
+ rp6 ^= (rp6 >> 8);
+ rp6 &= 0xff;
+ rp8 ^= (rp8 >> 16);
+ rp8 ^= (rp8 >> 8);
+ rp8 &= 0xff;
+ rp10 ^= (rp10 >> 16);
+ rp10 ^= (rp10 >> 8);
+ rp10 &= 0xff;
+ rp12 ^= (rp12 >> 16);
+ rp12 ^= (rp12 >> 8);
+ rp12 &= 0xff;
+ rp14 ^= (rp14 >> 16);
+ rp14 ^= (rp14 >> 8);
+ rp14 &= 0xff;
+
+ /*
+ * we also need to calculate the row parity for rp0..rp3
+ * This is present in par, because par is now
+ * rp3 rp3 rp2 rp2
+ * as well as
+ * rp1 rp0 rp1 rp0
+ * First calculate rp2 and rp3
+ * (and yes: rp2 = (par ^ rp3) & 0xff; but doing that did not
+ * give a performance improvement)
+ */
+ rp3 = (par >> 16);
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+ rp2 = par & 0xffff;
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+
+ /* reduce par to 16 bits then calculate rp1 and rp0 */
+ par ^= (par >> 16);
+ rp1 = (par >> 8) & 0xff;
+ rp0 = (par & 0xff);
+
+ /* finally reduce par to 8 bits */
+ par ^= (par >> 8);
+ par &= 0xff;
+
+ /*
+ * and calculate rp5..rp15
+ * note that par = rp4 ^ rp5 and due to the commutative property
+ * of the ^ operator we can say:
+ * rp5 = (par ^ rp4);
+ * The & 0xff seems superfluous, but benchmarking learned that
+ * leaving it out gives slightly worse results. No idea why, probably
+ * it has to do with the way the pipeline in pentium is organized.
+ */
+ rp5 = (par ^ rp4) & 0xff;
+ rp7 = (par ^ rp6) & 0xff;
+ rp9 = (par ^ rp8) & 0xff;
+ rp11 = (par ^ rp10) & 0xff;
+ rp13 = (par ^ rp12) & 0xff;
+ rp15 = (par ^ rp14) & 0xff;
+
+ /*
+ * Finally calculate the ecc bits.
+ * Again here it might seem that there are performance optimisations
+ * possible, but benchmarks showed that on the system this is developed
+ * the code below is the fastest
+ */
#ifdef CONFIG_MTD_NAND_ECC_SMC
- ecc_code[0] = ~tmp2;
- ecc_code[1] = ~tmp1;
+ code[0] =
+ (invparity[rp7] << 7) |
+ (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) |
+ (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) |
+ (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) |
+ (invparity[rp0]);
+ code[1] =
+ (invparity[rp15] << 7) |
+ (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) |
+ (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) |
+ (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) |
+ (invparity[rp8]);
#else
- ecc_code[0] = ~tmp1;
- ecc_code[1] = ~tmp2;
+ code[1] =
+ (invparity[rp7] << 7) |
+ (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) |
+ (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) |
+ (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) |
+ (invparity[rp0]);
+ code[0] =
+ (invparity[rp15] << 7) |
+ (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) |
+ (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) |
+ (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) |
+ (invparity[rp8]);
#endif
- ecc_code[2] = ((~reg1) << 2) | 0x03;
-
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ 3;
return 0;
}
EXPORT_SYMBOL(nand_calculate_ecc);
-static inline int countbits(uint32_t byte)
-{
- int res = 0;
-
- for (;byte; byte >>= 1)
- res += byte & 0x01;
- return res;
-}
-
/**
* nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
+ * @mtd: MTD block structure (unused)
* @dat: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
*
* Detect and correct a 1 bit error for 256 byte block
*/
-int nand_correct_data(struct mtd_info *mtd, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc)
+int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
{
- uint8_t s0, s1, s2;
+ int nr_bits;
+ unsigned char b0, b1, b2;
+ unsigned char byte_addr, bit_addr;
+ /*
+ * b0 to b2 indicate which bit is faulty (if any)
+ * we might need the xor result more than once,
+ * so keep them in a local var
+ */
#ifdef CONFIG_MTD_NAND_ECC_SMC
- s0 = calc_ecc[0] ^ read_ecc[0];
- s1 = calc_ecc[1] ^ read_ecc[1];
- s2 = calc_ecc[2] ^ read_ecc[2];
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
#else
- s1 = calc_ecc[0] ^ read_ecc[0];
- s0 = calc_ecc[1] ^ read_ecc[1];
- s2 = calc_ecc[2] ^ read_ecc[2];
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
#endif
- if ((s0 | s1 | s2) == 0)
- return 0;
-
- /* Check for a single bit error */
- if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
- ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
- ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
-
- uint32_t byteoffs, bitnum;
+ b2 = read_ecc[2] ^ calc_ecc[2];
- byteoffs = (s1 << 0) & 0x80;
- byteoffs |= (s1 << 1) & 0x40;
- byteoffs |= (s1 << 2) & 0x20;
- byteoffs |= (s1 << 3) & 0x10;
+ /* check if there are any bitfaults */
- byteoffs |= (s0 >> 4) & 0x08;
- byteoffs |= (s0 >> 3) & 0x04;
- byteoffs |= (s0 >> 2) & 0x02;
- byteoffs |= (s0 >> 1) & 0x01;
+ /* count nr of bits; use table lookup, faster than calculating it */
+ nr_bits = bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2];
- bitnum = (s2 >> 5) & 0x04;
- bitnum |= (s2 >> 4) & 0x02;
- bitnum |= (s2 >> 3) & 0x01;
-
- dat[byteoffs] ^= (1 << bitnum);
-
- return 1;
+ /* repeated if statements are slightly more efficient than switch ... */
+ /* ordered in order of likelihood */
+ if (nr_bits == 0)
+ return (0); /* no error */
+ if (nr_bits == 11) { /* correctable error */
+ /*
+ * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte
+ * cp 5/3/1 indicate the faulty bit.
+ * A lookup table (called addressbits) is used to filter
+ * the bits from the byte they are in.
+ * A marginal optimisation is possible by having three
+ * different lookup tables.
+ * One as we have now (for b0), one for b2
+ * (that would avoid the >> 1), and one for b1 (with all values
+ * << 4). However it was felt that introducing two more tables
+ * hardly justify the gain.
+ *
+ * The b2 shift is there to get rid of the lowest two bits.
+ * We could also do addressbits[b2] >> 1 but for the
+ * performace it does not make any difference
+ */
+ byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+ bit_addr = addressbits[b2 >> 2];
+ /* flip the bit */
+ buf[byte_addr] ^= (1 << bit_addr);
+ return (1);
}
-
- if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
- return 1;
-
- return -EBADMSG;
+ if (nr_bits == 1)
+ return (1); /* error in ecc data; no action needed */
+ return -1;
}
EXPORT_SYMBOL(nand_correct_data);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
MODULE_DESCRIPTION("Generic NAND ECC support");