diff options
42 files changed, 1449 insertions, 540 deletions
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h index b520c4b5678..b2ad8090bd1 100644 --- a/arch/arm/mach-davinci/include/mach/nand.h +++ b/arch/arm/mach-davinci/include/mach/nand.h @@ -79,6 +79,10 @@ struct davinci_nand_pdata { /* platform_data */ /* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */ unsigned options; + + /* Main and mirror bbt descriptor overrides */ + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; }; #endif /* __ARCH_ARM_DAVINCI_NAND_H */ diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h index 18f958801e6..afd1673ad0e 100644 --- a/arch/arm/plat-s3c/include/plat/nand.h +++ b/arch/arm/plat-s3c/include/plat/nand.h @@ -17,6 +17,7 @@ * Setting this flag will allow the kernel to * look for it at boot time and also skip the NAND * scan. + * @options: Default value to set into 'struct nand_chip' options. * @nr_chips: Number of chips in this set * @nr_partitions: Number of partitions pointed to by @partitions * @name: Name of set (optional) @@ -31,6 +32,7 @@ struct s3c2410_nand_set { unsigned int disable_ecc:1; unsigned int flash_bbt:1; + unsigned int options; int nr_chips; int nr_partitions; char *name; diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index e7563a9872d..5fbf29e1e64 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -43,15 +43,17 @@ // debugging, turns off buffer write mode if set to 1 #define FORCE_WORD_WRITE 0 -#define MANUFACTURER_INTEL 0x0089 +/* Intel chips */ #define I82802AB 0x00ad #define I82802AC 0x00ac #define PF38F4476 0x881c -#define MANUFACTURER_ST 0x0020 +/* STMicroelectronics chips */ #define M50LPW080 0x002F #define M50FLW080A 0x0080 #define M50FLW080B 0x0081 +/* Atmel chips */ #define AT49BV640D 0x02de +#define AT49BV640DT 0x02db static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -199,6 +201,16 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) cfi->cfiq->BufWriteTimeoutMax = 0; } +static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *cfip = cfi->cmdset_priv; + + cfip->FeatureSupport |= (1 << 5); + mtd->flags |= MTD_POWERUP_LOCK; +} + #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) @@ -283,6 +295,8 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param) static struct cfi_fixup cfi_fixup_table[] = { { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, + { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL }, + { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL }, #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, #endif @@ -294,16 +308,16 @@ static struct cfi_fixup cfi_fixup_table[] = { #endif { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, - { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, + { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, { 0, 0, NULL, NULL } }; static struct cfi_fixup jedec_fixup_table[] = { - { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, + { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, { 0, 0, NULL, NULL } }; static struct cfi_fixup fixup_table[] = { @@ -319,7 +333,7 @@ static struct cfi_fixup fixup_table[] = { static void cfi_fixup_major_minor(struct cfi_private *cfi, struct cfi_pri_intelext *extp) { - if (cfi->mfr == MANUFACTURER_INTEL && + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == PF38F4476 && extp->MinorVersion == '3') extp->MinorVersion = '1'; } @@ -2235,7 +2249,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, /* Some chips have OTP located in the _top_ partition only. For example: Intel 28F256L18T (T means top-parameter device) */ - if (cfi->mfr == MANUFACTURER_INTEL) { + if (cfi->mfr == CFI_MFR_INTEL) { switch (cfi->id) { case 0x880b: case 0x880c: @@ -2564,6 +2578,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd) if (!ret) { map_write(map, CMD(0xff), chip->start); chip->state = FL_SHUTDOWN; + put_chip(map, chip, chip->start); } spin_unlock(chip->mutex); } diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 94bb61e1904..1d49e18adbf 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -490,10 +490,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) } #endif - /* FIXME: erase-suspend-program is broken. See - http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */ - printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n"); - __module_get(THIS_MODULE); return mtd; @@ -589,15 +585,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr return 0; case FL_ERASING: - if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ - goto sleep; - - if (!( mode == FL_READY - || mode == FL_POINT - || !cfip - || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)) - || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1) - ))) + if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || + !(mode == FL_READY || mode == FL_POINT || + (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) goto sleep; /* We could check to see if we're trying to access the sector diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index c5a84fda541..ca584d0380b 100755 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -71,6 +71,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); if (cfi_qry_present(map, base, cfi)) return 1; + /* some old SST chips, e.g. 39VF160x/39VF320x */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); + if (cfi_qry_present(map, base, cfi)) + return 1; /* QRY not found */ return 0; } diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 736a3be265f..1bec5e1ce6a 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -142,8 +142,8 @@ /* ST - www.st.com */ #define M29F800AB 0x0058 -#define M29W800DT 0x00D7 -#define M29W800DB 0x005B +#define M29W800DT 0x22D7 +#define M29W800DB 0x225B #define M29W400DT 0x00EE #define M29W400DB 0x00EF #define M29W160DT 0x22C4 @@ -1575,7 +1575,7 @@ static const struct amd_flash_info jedec_table[] = { .dev_id = M29W800DT, .name = "ST M29W800DT", .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, - .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ + .uaddr = MTD_UADDR_0x0AAA_0x0555, .dev_size = SIZE_1MiB, .cmd_set = P_ID_AMD_STD, .nr_regions = 4, @@ -1590,7 +1590,7 @@ static const struct amd_flash_info jedec_table[] = { .dev_id = M29W800DB, .name = "ST M29W800DB", .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, - .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ + .uaddr = MTD_UADDR_0x0AAA_0x0555, .dev_size = SIZE_1MiB, .cmd_set = P_ID_AMD_STD, .nr_regions = 4, diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 933267a7a2a..8f8c249d7c4 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/math64.h> +#include <linux/mod_devicetable.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> @@ -28,9 +29,6 @@ #include <linux/spi/spi.h> #include <linux/spi/flash.h> - -#define FLASH_PAGESIZE 256 - /* Flash opcodes. */ #define OPCODE_WREN 0x06 /* Write enable */ #define OPCODE_RDSR 0x05 /* Read status register */ @@ -60,7 +58,7 @@ /* Define max times to check status register before we give up. */ #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ -#define CMD_SIZE 4 +#define MAX_CMD_SIZE 4 #ifdef CONFIG_M25PXX_USE_FAST_READ #define OPCODE_READ OPCODE_FAST_READ @@ -77,8 +75,10 @@ struct m25p { struct mutex lock; struct mtd_info mtd; unsigned partitioned:1; + u16 page_size; + u16 addr_width; u8 erase_opcode; - u8 command[CMD_SIZE + FAST_READ_DUMMY_BYTE]; + u8 *command; }; static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) @@ -197,6 +197,19 @@ static int erase_chip(struct m25p *flash) return 0; } +static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd) +{ + /* opcode is in cmd[0] */ + cmd[1] = addr >> (flash->addr_width * 8 - 8); + cmd[2] = addr >> (flash->addr_width * 8 - 16); + cmd[3] = addr >> (flash->addr_width * 8 - 24); +} + +static int m25p_cmdsz(struct m25p *flash) +{ + return 1 + flash->addr_width; +} + /* * Erase one sector of flash memory at offset ``offset'' which is any * address within the sector which should be erased. @@ -218,11 +231,9 @@ static int erase_sector(struct m25p *flash, u32 offset) /* Set up command buffer. */ flash->command[0] = flash->erase_opcode; - flash->command[1] = offset >> 16; - flash->command[2] = offset >> 8; - flash->command[3] = offset; + m25p_addr2cmd(flash, offset, flash->command); - spi_write(flash->spi, flash->command, CMD_SIZE); + spi_write(flash->spi, flash->command, m25p_cmdsz(flash)); return 0; } @@ -324,7 +335,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, * Should add 1 byte DUMMY_BYTE. */ t[0].tx_buf = flash->command; - t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE; + t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE; spi_message_add_tail(&t[0], &m); t[1].rx_buf = buf; @@ -351,13 +362,11 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, /* Set up the write data buffer. */ flash->command[0] = OPCODE_READ; - flash->command[1] = from >> 16; - flash->command[2] = from >> 8; - flash->command[3] = from; + m25p_addr2cmd(flash, from, flash->command); spi_sync(flash->spi, &m); - *retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE; + *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE; mutex_unlock(&flash->lock); @@ -395,7 +404,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, memset(t, 0, (sizeof t)); t[0].tx_buf = flash->command; - t[0].len = CMD_SIZE; + t[0].len = m25p_cmdsz(flash); spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; @@ -413,41 +422,36 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, /* Set up the opcode in the write buffer. */ flash->command[0] = OPCODE_PP; - flash->command[1] = to >> 16; - flash->command[2] = to >> 8; - flash->command[3] = to; + m25p_addr2cmd(flash, to, flash->command); - /* what page do we start with? */ - page_offset = to % FLASH_PAGESIZE; + page_offset = to & (flash->page_size - 1); /* do all the bytes fit onto one page? */ - if (page_offset + len <= FLASH_PAGESIZE) { + if (page_offset + len <= flash->page_size) { t[1].len = len; spi_sync(flash->spi, &m); - *retlen = m.actual_length - CMD_SIZE; + *retlen = m.actual_length - m25p_cmdsz(flash); } else { u32 i; /* the size of data remaining on the first page */ - page_size = FLASH_PAGESIZE - page_offset; + page_size = flash->page_size - page_offset; t[1].len = page_size; spi_sync(flash->spi, &m); - *retlen = m.actual_length - CMD_SIZE; + *retlen = m.actual_length - m25p_cmdsz(flash); - /* write everything in PAGESIZE chunks */ + /* write everything in flash->page_size chunks */ for (i = page_size; i < len; i += page_size) { page_size = len - i; - if (page_size > FLASH_PAGESIZE) - page_size = FLASH_PAGESIZE; + if (page_size > flash->page_size) + page_size = flash->page_size; /* write the next page to flash */ - flash->command[1] = (to + i) >> 16; - flash->command[2] = (to + i) >> 8; - flash->command[3] = (to + i); + m25p_addr2cmd(flash, to + i, flash->command); t[1].tx_buf = buf + i; t[1].len = page_size; @@ -459,7 +463,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, spi_sync(flash->spi, &m); if (retlen) - *retlen += m.actual_length - CMD_SIZE; + *retlen += m.actual_length - m25p_cmdsz(flash); } } @@ -491,7 +495,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, memset(t, 0, (sizeof t)); t[0].tx_buf = flash->command; - t[0].len = CMD_SIZE; + t[0].len = m25p_cmdsz(flash); spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; @@ -510,9 +514,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, /* Start write from odd address. */ if (actual) { flash->command[0] = OPCODE_BP; - flash->command[1] = to >> 16; - flash->command[2] = to >> 8; - flash->command[3] = to; + m25p_addr2cmd(flash, to, flash->command); /* write one byte. */ t[1].len = 1; @@ -520,17 +522,15 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, ret = wait_till_ready(flash); if (ret) goto time_out; - *retlen += m.actual_length - CMD_SIZE; + *retlen += m.actual_length - m25p_cmdsz(flash); } to += actual; flash->command[0] = OPCODE_AAI_WP; - flash->command[1] = to >> 16; - flash->command[2] = to >> 8; - flash->command[3] = to; + m25p_addr2cmd(flash, to, flash->command); /* Write out most of the data here. */ - cmd_sz = CMD_SIZE; + cmd_sz = m25p_cmdsz(flash); for (; actual < len - 1; actual += 2) { t[0].len = cmd_sz; /* write two bytes. */ @@ -554,10 +554,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, if (actual != len) { write_enable(flash); flash->command[0] = OPCODE_BP; - flash->command[1] = to >> 16; - flash->command[2] = to >> 8; - flash->command[3] = to; - t[0].len = CMD_SIZE; + m25p_addr2cmd(flash, to, flash->command); + t[0].len = m25p_cmdsz(flash); t[1].len = 1; t[1].tx_buf = buf + actual; @@ -565,7 +563,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, ret = wait_till_ready(flash); if (ret) goto time_out; - *retlen += m.actual_length - CMD_SIZE; + *retlen += m.actual_length - m25p_cmdsz(flash); write_disable(flash); } @@ -581,8 +579,6 @@ time_out: */ struct flash_info { - char *name; - /* JEDEC id zero means "no ID" (most older chips); otherwise it has * a high byte of zero plus three data bytes: the manufacturer id, * then a two byte device id. @@ -596,87 +592,118 @@ struct flash_info { unsigned sector_size; u16 n_sectors; + u16 page_size; + u16 addr_width; + u16 flags; #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ +#define M25P_NO_ERASE 0x02 /* No erase command needed */ }; +#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ + ((kernel_ulong_t)&(struct flash_info) { \ + .jedec_id = (_jedec_id), \ + .ext_id = (_ext_id), \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256, \ + .addr_width = 3, \ + .flags = (_flags), \ + }) + +#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \ + ((kernel_ulong_t)&(struct flash_info) { \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = (_page_size), \ + .addr_width = (_addr_width), \ + .flags = M25P_NO_ERASE, \ + }) /* NOTE: double check command sets and memory organization when you add * more flash chips. This current list focusses on newer chips, which * have been converging on command sets which including JEDEC ID. */ -static struct flash_info __devinitdata m25p_data [] = { - +static const struct spi_device_id m25p_ids[] = { /* Atmel -- some are (confusingly) marketed as "DataFlash" */ - { "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, }, - { "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, }, + { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, + { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, - { "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, }, - { "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, }, + { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, + { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, - { "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, }, - { "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, }, - { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, }, - { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, + { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, + { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, + { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, + { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, /* Macronix */ - { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, }, - { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, }, - { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, }, - { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, }, + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, + { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, /* Spansion -- single (large) sector size only, at least * for the chips listed here (without boot sectors). */ - { "s25sl004a", 0x010212, 0, 64 * 1024, 8, }, - { "s25sl008a", 0x010213, 0, 64 * 1024, 16, }, - { "s25sl016a", 0x010214, 0, 64 * 1024, 32, }, - { "s25sl032a", 0x010215, 0, 64 * 1024, 64, }, - { "s25sl064a", 0x010216, 0, 64 * 1024, 128, }, - { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, - { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, }, - { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, }, - { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, }, + { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, + { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, + { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, + { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, + { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, + { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, + { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, + { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, + { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, /* SST -- large erase sizes are "overlays", "sectors" are 4K */ - { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, }, - { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, }, - { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, }, - { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, }, - { "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, }, - { "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, }, - { "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, }, - { "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, }, + { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) }, + { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) }, + { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) }, + { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) }, + { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) }, /* ST Microelectronics -- newer production may have feature updates */ - { "m25p05", 0x202010, 0, 32 * 1024, 2, }, - { "m25p10", 0x202011, 0, 32 * 1024, 4, }, - { "m25p20", 0x202012, 0, 64 * 1024, 4, }, - { "m25p40", 0x202013, 0, 64 * 1024, 8, }, - { "m25p80", 0, 0, 64 * 1024, 16, }, - { "m25p16", 0x202015, 0, 64 * 1024, 32, }, - { "m25p32", 0x202016, 0, 64 * 1024, 64, }, - { "m25p64", 0x202017, 0, 64 * 1024, 128, }, - { "m25p128", 0x202018, 0, 256 * 1024, 64, }, - - { "m45pe10", 0x204011, 0, 64 * 1024, 2, }, - { "m45pe80", 0x204014, 0, 64 * 1024, 16, }, - { "m45pe16", 0x204015, 0, 64 * 1024, 32, }, - - { "m25pe80", 0x208014, 0, 64 * 1024, 16, }, - { "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, }, + { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, + { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, + { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, + { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, + { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, + { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, + { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, + { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, + { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, + + { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, + { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, + { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, + + { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, + { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ - { "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, }, - { "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, }, - { "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, }, - { "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, }, - { "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, }, - { "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, }, - { "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, }, + { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, + { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, + { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, + { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, + { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, + { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, + + /* Catalyst / On Semiconductor -- non-JEDEC */ + { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, + { "cat25c03", CAT25_INFO( 32, 8, 16, 2) }, + { "cat25c09", CAT25_INFO( 128, 8, 32, 2) }, + { "cat25c17", CAT25_INFO( 256, 8, 32, 2) }, + { "cat25128", CAT25_INFO(2048, 8, 64, 2) }, + { }, }; +MODULE_DEVICE_TABLE(spi, m25p_ids); -static struct flash_info *__devinit jedec_probe(struct spi_device *spi) +static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) { int tmp; u8 code = OPCODE_RDID; @@ -701,18 +728,24 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) jedec = jedec << 8; jedec |= id[2]; + /* + * Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants, + * which depend on technology process. Officially RDID command doesn't + * exist for non-JEDEC chips, but for compatibility they return ID 0. + */ + if (jedec == 0) + return NULL; + ext_jedec = id[3] << 8 | id[4]; - for (tmp = 0, info = m25p_data; - tmp < ARRAY_SIZE(m25p_data); - tmp++, info++) { + for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { + info = (void *)m25p_ids[tmp].driver_data; if (info->jedec_id == jedec) { if (info->ext_id != 0 && info->ext_id != ext_jedec) continue; - return info; + return &m25p_ids[tmp]; } } - dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); return NULL; } @@ -724,6 +757,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) */ static int __devinit m25p_probe(struct spi_device *spi) { + const struct spi_device_id *id = spi_get_device_id(spi); struct flash_platform_data *data; struct m25p *flash; struct flash_info *info; @@ -736,39 +770,53 @@ static int __devinit m25p_probe(struct spi_device *spi) */ data = spi->dev.platform_data; if (data && data->type) { - for (i = 0, info = m25p_data; - i < ARRAY_SIZE(m25p_data); - i++, info++) { - if (strcmp(data->type, info->name) == 0) - break; - } + const struct spi_device_id *plat_id; - /* unrecognized chip? */ - if (i == ARRAY_SIZE(m25p_data)) { - DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n", - dev_name(&spi->dev), data->type); - info = NULL; - - /* recognized; is that chip really what's there? */ - } else if (info->jedec_id) { - struct flash_info *chip = jedec_probe(spi); - - if (!chip || chip != info) { - dev_warn(&spi->dev, "found %s, expected %s\n", - chip ? chip->name : "UNKNOWN", - info->name); - info = NULL; - } + for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) { + plat_id = &m25p_ids[i]; + if (strcmp(data->type, plat_id->name)) + continue; + break; } - } else - info = jedec_probe(spi); - if (!info) - return -ENODEV; + if (plat_id) + id = plat_id; + else + dev_warn(&spi->dev, "unrecognized id %s\n", data->type); + } + + info = (void *)id->driver_data; + + if (info->jedec_id) { + const struct spi_device_id *jid; + + jid = jedec_probe(spi); + if (!jid) { + dev_info(&spi->dev, "non-JEDEC variant of %s\n", + id->name); + } else if (jid != id) { + /* + * JEDEC knows better, so overwrite platform ID. We + * can't trust partitions any longer, but we'll let + * mtd apply them anyway, since some partitions may be + * marked read-only, and we don't want to lose that + * information, even if it's not 100% accurate. + */ + dev_warn(&spi->dev, "found %s, expected %s\n", + jid->name, id->name); + id = jid; + info = (void *)jid->driver_data; + } + } flash = kzalloc(sizeof *flash, GFP_KERNEL); if (!flash) return -ENOMEM; + flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL); + if (!flash->command) { + kfree(flash); + return -ENOMEM; + } flash->spi = spi; mutex_init(&flash->lock); @@ -812,9 +860,14 @@ static int __devinit m25p_probe(struct spi_device *spi) flash->mtd.erasesize = info->sector_size; } + if (info->flags & M25P_NO_ERASE) + flash->mtd.flags |= MTD_NO_ERASE; + flash->mtd.dev.parent = &spi->dev; + flash->page_size = info->page_size; + flash->addr_width = info->addr_width; - dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name, + dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, (long long)flash->mtd.size >> 10); DEBUG(MTD_DEBUG_LEVEL2, @@ -888,8 +941,10 @@ static int __devexit m25p_remove(struct spi_device *spi) status = del_mtd_partitions(&flash->mtd); else status = del_mtd_device(&flash->mtd); - if (status == 0) + if (status == 0) { + kfree(flash->command); kfree(flash); + } return 0; } @@ -900,6 +955,7 @@ static struct spi_driver m25p80_driver = { .bus = &spi_bus_type, .owner = THIS_MODULE, }, + .id_table = m25p_ids, .probe = m25p_probe, .remove = __devexit_p(m25p_remove), diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 34f3cddb384..3078d6d0112 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_MTD_CEIVA) += ceiva.o obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o obj-$(CONFIG_MTD_PHYSMAP) += physmap.o obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o +obj-$(CONFIG_MTD_PISMO) += pismo.o obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 7214b876feb..7b051529741 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -210,7 +210,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) * not attempt to do a direct access on us. */ info->map.phys = NO_XIP; - info->map.size = dev->resource->end - dev->resource->start + 1; + info->map.size = resource_size(dev->resource); /* * We only support 16-bit accesses for now. If and when @@ -224,7 +224,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) info->map.copy_from = ixp4xx_copy_from, info->res = request_mem_region(dev->resource->start, - dev->resource->end - dev->resource->start + 1, + resource_size(dev->resource), "IXP4XXFlash"); if (!info->res) { printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); @@ -233,7 +233,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) } info->map.virt = ioremap(dev->resource->start, - dev->resource->end - dev->resource->start + 1); + resource_size(dev->resource)); if (!info->map.virt) { printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); err = -EIO; diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 3f13a9673e7..d9603f7f965 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -129,7 +129,7 @@ static int physmap_flash_probe(struct platform_device *dev) info->map[i].size); if (info->map[i].virt == NULL) { dev_err(&dev->dev, "Failed to ioremap flash region\n"); - err = EIO; + err = -EIO; goto err_out; } diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c index 1f73297e777..82afad0ddd7 100644 --- a/drivers/mtd/maps/vmu-flash.c +++ b/drivers/mtd/maps/vmu-flash.c @@ -612,16 +612,15 @@ static int __devinit vmu_connect(struct maple_device *mdev) test_flash_data = be32_to_cpu(mdev->devinfo.function); /* Need to count how many bits are set - to find out which - * function_data element has details of the memory card: - * using Brian Kernighan's/Peter Wegner's method */ - for (c = 0; test_flash_data; c++) - test_flash_data &= test_flash_data - 1; + * function_data element has details of the memory card + */ + c = hweight_long(test_flash_data); basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); card = kmalloc(sizeof(struct memcard), GFP_KERNEL); if (!card) { - error = ENOMEM; + error = -ENOMEM; goto fail_nomem; } diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 955226d7480..fa83757e7c9 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -91,9 +91,6 @@ static int mtd_blktrans_thread(void *arg) struct request_queue *rq = tr->blkcore_priv->rq; struct request *req = NULL; - /* we might get involved when memory gets low, so use PF_MEMALLOC */ - current->flags |= PF_MEMALLOC; - spin_lock_irq(rq->queue_lock); while (!kthread_should_stop()) { diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 467a4f177bf..c356c0a30c3 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -447,7 +447,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) for (i=0; i< MAX_MTD_DEVICES; i++) if (mtd_table[i] == mtd) ret = mtd_table[i]; - } else if (num < MAX_MTD_DEVICES) { + } else if (num >= 0 && num < MAX_MTD_DEVICES) { ret = mtd_table[num]; if (mtd && mtd != ret) ret = NULL; diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 1060337c06d..a714ec48276 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -29,14 +29,34 @@ #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> -#include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> +#include <linux/kmsg_dump.h> + +/* Maximum MTD partition size */ +#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 -#define OOPS_PAGE_SIZE 4096 +#define MTDOOPS_HEADER_SIZE 8 + +static unsigned long record_size = 4096; +module_param(record_size, ulong, 0400); +MODULE_PARM_DESC(record_size, + "record size for MTD OOPS pages in bytes (default 4096)"); + +static char mtddev[80]; +module_param_string(mtddev, mtddev, 80, 0400); +MODULE_PARM_DESC(mtddev, + "name or index number of the MTD device to use"); + +static int dump_oops = 1; +module_param(dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); static struct mtdoops_context { + struct kmsg_dumper dump; + int mtd_index; struct work_struct work_erase; struct work_struct work_write; @@ -44,28 +64,43 @@ static struct mtdoops_context { int oops_pages; int nextpage; int nextcount; - char *name; + unsigned long *oops_page_used; void *oops_buf; - - /* writecount and disabling ready are spin lock protected */ - spinlock_t writecount_lock; - int ready; - int writecount; } oops_cxt; +static void mark_page_used(struct mtdoops_context *cxt, int page) +{ + set_bit(page, cxt->oops_page_used); +} + +static void mark_page_unused(struct mtdoops_context *cxt, int page) +{ + clear_bit(page, cxt->oops_page_used); +} + +static int page_is_used(struct mtdoops_context *cxt, int page) +{ + return test_bit(page, cxt->oops_page_used); +} + static void mtdoops_erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } -static int mtdoops_erase_block(struct mtd_info *mtd, int offset) +static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) { + struct mtd_info *mtd = cxt->mtd; + u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; + u32 start_page = start_page_offset / record_size; + u32 erase_pages = mtd->erasesize / record_size; struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; int ret; + int page; init_waitqueue_head(&wait_q); erase.mtd = mtd; @@ -81,25 +116,24 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); - printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " - "on \"%s\" failed\n", - (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); + printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", + (unsigned long long)erase.addr, + (unsigned long long)erase.len, mtddev); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); + /* Mark pages as unused */ + for (page = start_page; page < start_page + erase_pages; page++) + mark_page_unused(cxt, page); + return 0; } static void mtdoops_inc_counter(struct mtdoops_context *cxt) { - struct mtd_info *mtd = cxt->mtd; - size_t retlen; - u32 count; - int ret; - cxt->nextpage++; if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; @@ -107,25 +141,13 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) if (cxt->nextcount == 0xffffffff) cxt->nextcount = 0; - ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, - &retlen, (u_char *) &count); - if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { - printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" - ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, - retlen, ret); + if (page_is_used(cxt, cxt->nextpage)) { schedule_work(&cxt->work_erase); return; } - /* See if we need to erase the next block */ - if (count != 0xffffffff) { - schedule_work(&cxt->work_erase); - return; - } - - printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", - cxt->nextpage, cxt->nextcount); - cxt->ready = 1; + printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", + cxt->nextpage, cxt->nextcount); } /* Scheduled work - when we can't proceed without erasing a block */ @@ -140,47 +162,47 @@ static void mtdoops_workfunc_erase(struct work_struct *work) if (!mtd) return; - mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; + mod = (cxt->nextpage * record_size) % mtd->erasesize; if (mod != 0) { - cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); + cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while (mtd->block_isbad) { - ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); + ret = mtd->block_isbad(mtd, cxt->nextpage * record_size); if (!ret) break; if (ret < 0) { - printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); + printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); return; } badblock: - printk(KERN_WARNING "mtdoops: Bad block at %08x\n", - cxt->nextpage * OOPS_PAGE_SIZE); + printk(KERN_WARNING "mtdoops: bad block at %08lx\n", + cxt->nextpage * record_size); i++; - cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); + cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; - if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { - printk(KERN_ERR "mtdoops: All blocks bad!\n"); + if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { + printk(KERN_ERR "mtdoops: all blocks bad!\n"); return; } } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) - ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); + ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { - printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); - cxt->ready = 1; + printk(KERN_DEBUG "mtdoops: ready %d, %d\n", + cxt->nextpage, cxt->nextcount); return; } - if (mtd->block_markbad && (ret == -EIO)) { - ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); + if (mtd->block_markbad && ret == -EIO) { + ret = mtd->block_markbad(mtd, cxt->nextpage * record_size); if (ret < 0) { - printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); + printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); return; } } @@ -191,36 +213,37 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; size_t retlen; + u32 *hdr; int ret; - if (cxt->writecount < OOPS_PAGE_SIZE) - memset(cxt->oops_buf + cxt->writecount, 0xff, - OOPS_PAGE_SIZE - cxt->writecount); + /* Add mtdoops header to the buffer */ + hdr = cxt->oops_buf; + hdr[0] = cxt->nextcount; + hdr[1] = MTDOOPS_KERNMSG_MAGIC; if (panic) - ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, - OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); + ret = mtd->panic_write(mtd, cxt->nextpage * record_size, + record_size, &retlen, cxt->oops_buf); else - ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, - OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); - - cxt->writecount = 0; + ret = mtd->write(mtd, cxt->nextpage * record_size, + record_size, &retlen, cxt->oops_buf); - if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) - printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", - cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); + if (retlen != record_size || ret < 0) + printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", + cxt->nextpage * record_size, retlen, record_size, ret); + mark_page_used(cxt, cxt->nextpage); + memset(cxt->oops_buf, 0xff, record_size); mtdoops_inc_counter(cxt); } - static void mtdoops_workfunc_write(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_write); mtdoops_write(cxt, 0); -} +} static void find_next_position(struct mtdoops_context *cxt) { @@ -230,28 +253,33 @@ static void find_next_position(struct mtdoops_context *cxt) size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { - ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); - if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { - printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" - ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); + /* Assume the page is used */ + mark_page_used(cxt, page); + ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, + &retlen, (u_char *) &count[0]); + if (retlen != MTDOOPS_HEADER_SIZE || + (ret < 0 && ret != -EUCLEAN)) { + printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", + page * record_size, retlen, + MTDOOPS_HEADER_SIZE, ret); continue; } - if (count[1] != MTDOOPS_KERNMSG_MAGIC) - continue; + if (count[0] == 0xffffffff && count[1] == 0xffffffff) + mark_page_unused(cxt, page); if (count[0] == 0xffffffff) continue; if (maxcount == 0xffffffff) { maxcount = count[0]; maxpos = page; - } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { + } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { maxcount = count[0]; maxpos = page; - } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { + } else if (count[0] > maxcount && count[0] < 0xc0000000) { maxcount = count[0]; maxpos = page; - } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) - && (maxcount > 0x80000000)) { + } else if (count[0] > maxcount && count[0] > 0xc0000000 + && maxcount > 0x80000000) { maxcount = count[0]; maxpos = page; } @@ -269,187 +297,170 @@ static void find_next_position(struct mtdoops_context *cxt) mtdoops_inc_counter(cxt); } - -static void mtdoops_notify_add(struct mtd_info *mtd) +static void mtdoops_do_dump(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, const char *s1, unsigned long l1, + const char *s2, unsigned long l2) { - struct mtdoops_context *cxt = &oops_cxt; + struct mtdoops_context *cxt = container_of(dumper, + struct mtdoops_context, dump); + unsigned long s1_start, s2_start; + unsigned long l1_cpy, l2_cpy; + char *dst; + + /* Only dump oopses if dump_oops is set */ + if (reason == KMSG_DUMP_OOPS && !dump_oops) + return; - if (cxt->name && !strcmp(mtd->name, cxt->name)) - cxt->mtd_index = mtd->index; + dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ + l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); + l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); - if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) - return; + s2_start = l2 - l2_cpy; + s1_start = l1 - l1_cpy; - if (mtd->size < (mtd->erasesize * 2)) { - printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", - mtd->index); - return; - } + memcpy(dst, s1 + s1_start, l1_cpy); + memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); - if (mtd->erasesize < OOPS_PAGE_SIZE) { - printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", - mtd->index); + /* Panics must be written immediately */ + if (reason == KMSG_DUMP_PANIC) { + if (!cxt->mtd->panic_write) + printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); + else + mtdoops_write(cxt, 1); return; } - cxt->mtd = mtd; - if (mtd->size > INT_MAX) - cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; - else - cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; - - find_next_position(cxt); - - printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); + /* For other cases, schedule work to write it "nicely" */ + schedule_work(&cxt->work_write); } -static void mtdoops_notify_remove(struct mtd_info *mtd) +static void mtdoops_notify_add(struct mtd_info *mtd) { struct mtdoops_context *cxt = &oops_cxt; + u64 mtdoops_pages = div_u64(mtd->size, record_size); + int err; - if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) - return; - - cxt->mtd = NULL; - flush_scheduled_work(); -} - -static void mtdoops_console_sync(void) -{ - struct mtdoops_context *cxt = &oops_cxt; - struct mtd_info *mtd = cxt->mtd; - unsigned long flags; + if (!strcmp(mtd->name, mtddev)) + cxt->mtd_index = mtd->index; - if (!cxt->ready || !mtd || cxt->writecount == 0) + if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) return; - /* - * Once ready is 0 and we've held the lock no further writes to the - * buffer will happen - */ - spin_lock_irqsave(&cxt->writecount_lock, flags); - if (!cxt->ready) { - spin_unlock_irqrestore(&cxt->writecount_lock, flags); + if (mtd->size < mtd->erasesize * 2) { + printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", + mtd->index); return; } - cxt->ready = 0; - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - - if (mtd->panic_write && in_interrupt()) - /* Interrupt context, we're going to panic so try and log */ - mtdoops_write(cxt, 1); - else - schedule_work(&cxt->work_write); -} - -static void -mtdoops_console_write(struct console *co, const char *s, unsigned int count) -{ - struct mtdoops_context *cxt = co->data; - struct mtd_info *mtd = cxt->mtd; - unsigned long flags; - - if (!oops_in_progress) { - mtdoops_console_sync(); + if (mtd->erasesize < record_size) { + printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", + mtd->index); return; } - - if (!cxt->ready || !mtd) + if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { + printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", + mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); return; + } - /* Locking on writecount ensures sequential writes to the buffer */ - spin_lock_irqsave(&cxt->writecount_lock, flags); - - /* Check ready status didn't change whilst waiting for the lock */ - if (!cxt->ready) { - spin_unlock_irqrestore(&cxt->writecount_lock, flags); + /* oops_page_used is a bit field */ + cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, + BITS_PER_LONG)); + if (!cxt->oops_page_used) { + printk(KERN_ERR "mtdoops: could not allocate page array\n"); return; } - if (cxt->writecount == 0) { - u32 *stamp = cxt->oops_buf; - *stamp++ = cxt->nextcount; - *stamp = MTDOOPS_KERNMSG_MAGIC; - cxt->writecount = 8; + cxt->dump.dump = mtdoops_do_dump; + err = kmsg_dump_register(&cxt->dump); + if (err) { + printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); + vfree(cxt->oops_page_used); + cxt->oops_page_used = NULL; + return; } - if ((count + cxt->writecount) > OOPS_PAGE_SIZE) - count = OOPS_PAGE_SIZE - cxt->writecount; - - memcpy(cxt->oops_buf + cxt->writecount, s, count); - cxt->writecount += count; - - spin_unlock_irqrestore(&cxt->writecount_lock, flags); - - if (cxt->writecount == OOPS_PAGE_SIZE) - mtdoops_console_sync(); + cxt->mtd = mtd; + cxt->oops_pages = (int)mtd->size / record_size; + find_next_position(cxt); + printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); } -static int __init mtdoops_console_setup(struct console *co, char *options) +static void mtdoops_notify_remove(struct mtd_info *mtd) { - struct mtdoops_context *cxt = co->data; + struct mtdoops_context *cxt = &oops_cxt; - if (cxt->mtd_index != -1 || cxt->name) - return -EBUSY; - if (options) { - cxt->name = kstrdup(options, GFP_KERNEL); - return 0; - } - if (co->index == -1) - return -EINVAL; + if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) + return; - cxt->mtd_index = co->index; - return 0; + if (kmsg_dump_unregister(&cxt->dump) < 0) + printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); + + cxt->mtd = NULL; + flush_scheduled_work(); } + static struct mtd_notifier mtdoops_notifier = { .add = mtdoops_notify_add, .remove = mtdoops_notify_remove, }; -static struct console mtdoops_console = { - .name = "ttyMTD", - .write = mtdoops_console_write, - .setup = mtdoops_console_setup, - .unblank = mtdoops_console_sync, - .index = -1, - .data = &oops_cxt, -}; - -static int __init mtdoops_console_init(void) +static int __init mtdoops_init(void) { struct mtdoops_context *cxt = &oops_cxt; + int mtd_index; + char *endp; + if (strlen(mtddev) == 0) { + printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); + return -EINVAL; + } + if ((record_size & 4095) != 0) { + printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); + return -EINVAL; + } + if (record_size < 4096) { + printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); + return -EINVAL; + } + + /* Setup the MTD device to use */ cxt->mtd_index = -1; - cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); - spin_lock_init(&cxt->writecount_lock); + mtd_index = simple_strtoul(mtddev, &endp, 0); + if (*endp == '\0') + cxt->mtd_index = mtd_index; + if (cxt->mtd_index > MAX_MTD_DEVICES) { + printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n", + mtd_index); + return -EINVAL; + } + cxt->oops_buf = vmalloc(record_size); if (!cxt->oops_buf) { - printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); + printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); return -ENOMEM; } + memset(cxt->oops_buf, 0xff, record_size); INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); - register_console(&mtdoops_console); register_mtd_user(&mtdoops_notifier); return 0; } -static void __exit mtdoops_console_exit(void) +static void __exit mtdoops_exit(void) { struct mtdoops_context *cxt = &oops_cxt; unregister_mtd_user(&mtdoops_notifier); - unregister_console(&mtdoops_console); - kfree(cxt->name); vfree(cxt->oops_buf); + vfree(cxt->oops_page_used); } -subsys_initcall(mtdoops_console_init); -module_exit(mtdoops_console_exit); +module_init(mtdoops_init); +module_exit(mtdoops_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c index 6d9649159a1..2d6773281fd 100644 --- a/drivers/mtd/nand/alauda.c +++ b/drivers/mtd/nand/alauda.c @@ -372,15 +372,6 @@ static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob) return __alauda_read_page(mtd, from, ignore_buf, oob); } -static int popcount8(u8 c) -{ - int ret = 0; - - for ( ; c; c>>=1) - ret += c & 1; - return ret; -} - static int alauda_isbad(struct mtd_info *mtd, loff_t ofs) { u8 oob[16]; @@ -391,7 +382,7 @@ static int alauda_isbad(struct mtd_info *mtd, loff_t ofs) return err; /* A block is marked bad if two or more bits are zero */ - return popcount8(oob[5]) >= 7 ? 0 : 1; + return hweight8(oob[5]) >= 7 ? 0 : 1; } static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len, diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 4f62d207b87..524e6c9e067 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -463,7 +463,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) if (host->board->det_pin) { if (gpio_get_value(host->board->det_pin)) { printk(KERN_INFO "No SmartMedia card inserted.\n"); - res = ENXIO; + res = -ENXIO; goto err_no_card; } } @@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) if ((!partitions) || (num_partitions == 0)) { printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); - res = ENXIO; + res = -ENXIO; goto err_no_partitions; } diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 68cc9247fdb..fe3eba87de4 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -591,6 +591,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev) /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ info->chip.options = pdata->options; + info->chip.bbt_td = pdata->bbt_td; + info->chip.bbt_md = pdata->bbt_md; info->ioaddr = (uint32_t __force) vaddr; diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c index 72446fb48d4..af6a6a5399e 100644 --- a/drivers/mtd/nand/excite_nandflash.c +++ b/drivers/mtd/nand/excite_nandflash.c @@ -128,7 +128,7 @@ static int excite_nand_devready(struct mtd_info *mtd) * The binding to the mtd and all allocated * resources are released. */ -static int __exit excite_nand_remove(struct platform_device *dev) +static int __devexit excite_nand_remove(struct platform_device *dev) { struct excite_nand_drvdata * const this = platform_get_drvdata(dev); diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index ddd37d2554e..ae30fb6eed9 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -237,12 +237,15 @@ static int fsl_elbc_run_command(struct mtd_info *mtd) ctrl->use_mdr = 0; - dev_vdbg(ctrl->dev, - "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n", - ctrl->status, ctrl->mdr, in_be32(&lbc->fmr)); + if (ctrl->status != LTESR_CC) { + dev_info(ctrl->dev, + "command failed: fir %x fcr %x status %x mdr %x\n", + in_be32(&lbc->fir), in_be32(&lbc->fcr), + ctrl->status, ctrl->mdr); + return -EIO; + } - /* returns 0 on success otherwise non-zero) */ - return ctrl->status == LTESR_CC ? 0 : -EIO; + return 0; } static void fsl_elbc_do_read(struct nand_chip *chip, int oob) @@ -253,17 +256,17 @@ static void fsl_elbc_do_read(struct nand_chip *chip, int oob) if (priv->page_size) { out_be32(&lbc->fir, - (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_CA << FIR_OP1_SHIFT) | (FIR_OP_PA << FIR_OP2_SHIFT) | - (FIR_OP_CW1 << FIR_OP3_SHIFT) | + (FIR_OP_CM1 << FIR_OP3_SHIFT) | (FIR_OP_RBW << FIR_OP4_SHIFT)); out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); } else { out_be32(&lbc->fir, - (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_CA << FIR_OP1_SHIFT) | (FIR_OP_PA << FIR_OP2_SHIFT) | (FIR_OP_RBW << FIR_OP3_SHIFT)); @@ -332,7 +335,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, case NAND_CMD_READID: dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); - out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) | + out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_UA << FIR_OP1_SHIFT) | (FIR_OP_RBW << FIR_OP2_SHIFT)); out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); @@ -359,16 +362,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); out_be32(&lbc->fir, - (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_PA << FIR_OP1_SHIFT) | - (FIR_OP_CM1 << FIR_OP2_SHIFT)); + (FIR_OP_CM2 << FIR_OP2_SHIFT) | + (FIR_OP_CW1 << FIR_OP3_SHIFT) | + (FIR_OP_RS << FIR_OP4_SHIFT)); out_be32(&lbc->fcr, (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | - (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT)); + (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | + (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT)); out_be32(&lbc->fbcr, 0); ctrl->read_bytes = 0; + ctrl->use_mdr = 1; fsl_elbc_run_command(mtd); return; @@ -383,40 +390,41 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, ctrl->column = column; ctrl->oob = 0; + ctrl->use_mdr = 1; - if (priv->page_size) { - fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) | - (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT); + fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | + (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) | + (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT); + if (priv->page_size) { out_be32(&lbc->fir, - (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CM2 << FIR_OP0_SHIFT) | (FIR_OP_CA << FIR_OP1_SHIFT) | (FIR_OP_PA << FIR_OP2_SHIFT) | (FIR_OP_WB << FIR_OP3_SHIFT) | - (FIR_OP_CW1 << FIR_OP4_SHIFT)); + (FIR_OP_CM3 << FIR_OP4_SHIFT) | + (FIR_OP_CW1 << FIR_OP5_SHIFT) | + (FIR_OP_RS << FIR_OP6_SHIFT)); } else { - fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) | - (NAND_CMD_SEQIN << FCR_CMD2_SHIFT); - out_be32(&lbc->fir, - (FIR_OP_CW0 << FIR_OP0_SHIFT) | + (FIR_OP_CM0 << FIR_OP0_SHIFT) | (FIR_OP_CM2 << FIR_OP1_SHIFT) | (FIR_OP_CA << FIR_OP2_SHIFT) | (FIR_OP_PA << FIR_OP3_SHIFT) | (FIR_OP_WB << FIR_OP4_SHIFT) | - (FIR_OP_CW1 << FIR_OP5_SHIFT)); + (FIR_OP_CM3 << FIR_OP5_SHIFT) | + (FIR_OP_CW1 << FIR_OP6_SHIFT) | + (FIR_OP_RS << FIR_OP7_SHIFT)); if (column >= mtd->writesize) { /* OOB area --> READOOB */ column -= mtd->writesize; fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; ctrl->oob = 1; - } else if (column < 256) { + } else { + WARN_ON(column != 0); /* First 256 bytes --> READ0 */ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; - } else { - /* Second 256 bytes --> READ1 */ - fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT; } } @@ -628,22 +636,6 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) { struct fsl_elbc_mtd *priv = chip->priv; struct fsl_elbc_ctrl *ctrl = priv->ctrl; - struct fsl_lbc_regs __iomem *lbc = ctrl->regs; - - if (ctrl->status != LTESR_CC) - return NAND_STATUS_FAIL; - - /* Use READ_STATUS command, but wait for the device to be ready */ - ctrl->use_mdr = 0; - out_be32(&lbc->fir, - (FIR_OP_CW0 << FIR_OP0_SHIFT) | - (FIR_OP_RBW << FIR_OP1_SHIFT)); - out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); - out_be32(&lbc->fbcr, 1); - set_addr(mtd, 0, 0, 0); - ctrl->read_bytes = 1; - - fsl_elbc_run_command(mtd); if (ctrl->status != LTESR_CC) return NAND_STATUS_FAIL; @@ -651,8 +643,7 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) /* The chip always seems to report that it is * write-protected, even when it is not. */ - setbits8(ctrl->addr, NAND_STATUS_WP); - return fsl_elbc_read_byte(mtd); + return (ctrl->mdr & 0xff) | NAND_STATUS_WP; } static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) @@ -946,6 +937,13 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl) { struct fsl_lbc_regs __iomem *lbc = ctrl->regs; + /* + * NAND transactions can tie up the bus for a long time, so set the + * bus timeout to max by clearing LBCR[BMT] (highest base counter + * value) and setting LBCR[BMTPS] to the highest prescaler value. + */ + clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15); + /* clear event registers */ setbits32(&lbc->ltesr, LTESR_NAND_MASK); out_be32(&lbc->lteatr, 0); diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index d120cd8d726..071a60cb420 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -112,7 +112,7 @@ static void fun_select_chip(struct mtd_info *mtd, int mchip_nr) if (mchip_nr == -1) { chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); - } else if (mchip_nr >= 0) { + } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) { fun->mchip_number = mchip_nr; chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr]; chip->IO_ADDR_W = chip->IO_ADDR_R; diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 53e94121c26..f7366e99fe1 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -922,7 +922,7 @@ static struct platform_driver mxcnd_driver = { .driver = { .name = DRIVER_NAME, }, - .remove = __exit_p(mxcnd_remove), + .remove = __devexit_p(mxcnd_remove), .suspend = mxcnd_suspend, .resume = mxcnd_resume, }; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 2957cc70da3..8f2958fe214 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -428,6 +428,28 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, return nand_isbad_bbt(mtd, ofs, allowbbt); } +/** + * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. + * @mtd: MTD device structure + * @timeo: Timeout + * + * Helper function for nand_wait_ready used when needing to wait in interrupt + * context. + */ +static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo) +{ + struct nand_chip *chip = mtd->priv; + int i; + + /* Wait for the device to get ready */ + for (i = 0; i < timeo; i++) { + if (chip->dev_ready(mtd)) + break; + touch_softlockup_watchdog(); + mdelay(1); + } +} + /* * Wait for the ready pin, after a command * The timeout is catched later. @@ -437,6 +459,10 @@ void nand_wait_ready(struct mtd_info *mtd) struct nand_chip *chip = mtd->priv; unsigned long timeo = jiffies + 2; + /* 400ms timeout */ + if (in_interrupt() || oops_in_progress) + return panic_nand_wait_ready(mtd, 400); + led_trigger_event(nand_led_trigger, LED_FULL); /* wait until command is processed or timeout occures */ do { @@ -672,6 +698,22 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /** + * panic_nand_get_device - [GENERIC] Get chip for selected access + * @chip: the nand chip descriptor + * @mtd: MTD device structure + * @new_state: the state which is requested + * + * Used when in panic, no locks are taken. + */ +static void panic_nand_get_device(struct nand_chip *chip, + struct mtd_info *mtd, int new_state) +{ + /* Hardware controller shared among independend devices */ + chip->controller->active = chip; + chip->state = new_state; +} + +/** * nand_get_device - [GENERIC] Get chip for selected access * @chip: the nand chip descriptor * @mtd: MTD device structure @@ -698,8 +740,14 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) return 0; } if (new_state == FL_PM_SUSPENDED) { - spin_unlock(lock); - return (chip->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN; + if (chip->controller->active->state == FL_PM_SUSPENDED) { + chip->state = FL_PM_SUSPENDED; + spin_unlock(lock); + return 0; + } else { + spin_unlock(lock); + return -EAGAIN; + } } set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(wq, &wait); @@ -710,6 +758,32 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) } /** + * panic_nand_wait - [GENERIC] wait until the command is done + * @mtd: MTD device structure + * @chip: NAND chip structure + * @timeo: Timeout + * + * Wait for command done. This is a helper function for nand_wait used when + * we are in interrupt context. May happen when in panic and trying to write + * an oops trough mtdoops. + */ +static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip, + unsigned long timeo) +{ + int i; + for (i = 0; i < timeo; i++) { + if (chip->dev_ready) { + if (chip->dev_ready(mtd)) + break; + } else { + if (chip->read_byte(mtd) & NAND_STATUS_READY) + break; + } + mdelay(1); + } +} + +/** * nand_wait - [DEFAULT] wait until the command is done * @mtd: MTD device structure * @chip: NAND chip structure @@ -740,15 +814,19 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) else chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); - while (time_before(jiffies, timeo)) { - if (chip->dev_ready) { - if (chip->dev_ready(mtd)) - break; - } else { - if (chip->read_byte(mtd) & NAND_STATUS_READY) - break; + if (in_interrupt() || oops_in_progress) + panic_nand_wait(mtd, chip, timeo); + else { + while (time_before(jiffies, timeo)) { + if (chip->dev_ready) { + if (chip->dev_ready(mtd)) + break; + } else { + if (chip->read_byte(mtd) & NAND_STATUS_READY) + break; + } + cond_resched(); } - cond_resched(); } led_trigger_event(nand_led_trigger, LED_OFF); @@ -1949,6 +2027,45 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, } /** + * panic_nand_write - [MTD Interface] NAND write with ECC + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * + * NAND write with ECC. Used when performing writes in interrupt context, this + * may for example be called by mtdoops when writing an oops while in panic. + */ +static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const uint8_t *buf) +{ + struct nand_chip *chip = mtd->priv; + int ret; + + /* Do not allow reads past end of device */ + if ((to + len) > mtd->size) + return -EINVAL; + if (!len) + return 0; + + /* Wait for the device to get ready. */ + panic_nand_wait(mtd, chip, 400); + + /* Grab the device. */ + panic_nand_get_device(chip, mtd, FL_WRITING); + + chip->ops.len = len; + chip->ops.datbuf = (uint8_t *)buf; + chip->ops.oobbuf = NULL; + + ret = nand_do_write_ops(mtd, to, &chip->ops); + + *retlen = chip->ops.retlen; + return ret; +} + +/** * nand_write - [MTD Interface] NAND write with ECC * @mtd: MTD device structure * @to: offset to write to @@ -2645,7 +2762,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips) type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); if (IS_ERR(type)) { - printk(KERN_WARNING "No NAND device found!!!\n"); + if (!(chip->options & NAND_SCAN_SILENT_NODEV)) + printk(KERN_WARNING "No NAND device found.\n"); chip->select_chip(mtd, -1); return PTR_ERR(type); } @@ -2877,6 +2995,7 @@ int nand_scan_tail(struct mtd_info *mtd) mtd->unpoint = NULL; mtd->read = nand_read; mtd->write = nand_write; + mtd->panic_write = panic_nand_write; mtd->read_oob = nand_read_oob; mtd->write_oob = nand_write_oob; mtd->sync = nand_sync; diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index db7ae9d6a29..809fb53304a 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -150,20 +150,19 @@ static const char addressbits[256] = { }; /** - * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte + * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte * block - * @mtd: MTD block structure * @buf: input buffer with raw data + * @eccsize: data bytes per ecc step (256 or 512) * @code: output buffer with ECC */ -int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, +void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, unsigned char *code) { int i; const uint32_t *bp = (uint32_t *)buf; /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = - (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; + const uint32_t eccsize_mult = eccsize >> 8; uint32_t cur; /* current value in buffer */ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; @@ -412,6 +411,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, (invparity[par & 0x55] << 2) | (invparity[rp17] << 1) | (invparity[rp16] << 0); +} +EXPORT_SYMBOL(__nand_calculate_ecc); + +/** + * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte + * block + * @mtd: MTD block structure + * @buf: input buffer with raw data + * @code: output buffer with ECC + */ +int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, + unsigned char *code) +{ + __nand_calculate_ecc(buf, + ((struct nand_chip *)mtd->priv)->ecc.size, code); + return 0; } EXPORT_SYMBOL(nand_calculate_ecc); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 6ea520ae241..75aa8713abd 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -489,7 +489,7 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) switch (info->state) { case STATE_PIO_WRITING: __raw_writesl(info->mmio_base + NDDB, info->data_buff, - info->data_size << 2); + info->data_size >> 2); enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); @@ -501,7 +501,7 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) break; case STATE_PIO_READING: __raw_readsl(info->mmio_base + NDDB, info->data_buff, - info->data_size << 2); + info->data_size >> 2); break; default: printk(KERN_ERR "%s: invalid state %d\n", __func__, diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 11dc7e69c4f..6e88bd3c02f 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -774,7 +774,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, chip->select_chip = s3c2410_nand_select_chip; chip->chip_delay = 50; chip->priv = nmtd; - chip->options = 0; + chip->options = set->options; chip->controller = &info->controller; switch (info->cpu_type) { diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index 73af8324d0d..863513c3b69 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c @@ -429,11 +429,10 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) chip = mtd->priv; txx9_priv = chip->priv; + nand_release(mtd); #ifdef CONFIG_MTD_PARTITIONS - del_mtd_partitions(mtd); kfree(drvdata->parts[i]); #endif - del_mtd_device(mtd); kfree(txx9_priv->mtdname); kfree(txx9_priv); } diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 0108ed42e87..2dafd0949be 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) unsigned long timeout; u32 syscfg; - if (state == FL_RESETING) { - int i; + if (state == FL_RESETING || state == FL_PREPARING_ERASE || + state == FL_VERIFYING_ERASE) { + int i = 21; + unsigned int intr_flags = ONENAND_INT_MASTER; + + switch (state) { + case FL_RESETING: + intr_flags |= ONENAND_INT_RESET; + break; + case FL_PREPARING_ERASE: + intr_flags |= ONENAND_INT_ERASE; + break; + case FL_VERIFYING_ERASE: + i = 101; + break; + } - for (i = 0; i < 20; i++) { + while (--i) { udelay(1); intr = read_reg(c, ONENAND_REG_INTERRUPT); if (intr & ONENAND_INT_MASTER) @@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) wait_err("controller error", state, ctrl, intr); return -EIO; } - if (!(intr & ONENAND_INT_RESET)) { + if ((intr & intr_flags) != intr_flags) { wait_err("timeout", state, ctrl, intr); return -EIO; } diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 6e250f3a4a1..3330ea06917 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -1,17 +1,19 @@ /* * linux/drivers/mtd/onenand/onenand_base.c * - * Copyright (C) 2005-2007 Samsung Electronics + * Copyright © 2005-2009 Samsung Electronics + * Copyright © 2007 Nokia Corporation + * * Kyungmin Park <kyungmin.park@samsung.com> * * Credits: * Adrian Hunter <ext-adrian.hunter@nokia.com>: * auto-placement support, read-while load support, various fixes - * Copyright (C) Nokia Corporation, 2007 * * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com> * Flex-OneNAND support - * Copyright (C) Samsung Electronics, 2008 + * Amul Kumar Saha <amul.saha at samsung.com> + * OTP support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -32,6 +34,13 @@ #include <asm/io.h> +/* + * Multiblock erase if number of blocks to erase is 2 or more. + * Maximum number of blocks for simultaneous erase is 64. + */ +#define MB_ERASE_MIN_BLK_COUNT 2 +#define MB_ERASE_MAX_BLK_COUNT 64 + /* Default Flex-OneNAND boundary and lock respectively */ static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 }; @@ -43,6 +52,18 @@ MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND" " : 0->Set boundary in unlocked status" " : 1->Set boundary in locked status"); +/* Default OneNAND/Flex-OneNAND OTP options*/ +static int otp; + +module_param(otp, int, 0400); +MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP" + "Syntax : otp=LOCK_TYPE" + "LOCK_TYPE : Keys issued, for specific OTP Lock type" + " : 0 -> Default (No Blocks Locked)" + " : 1 -> OTP Block lock" + " : 2 -> 1st Block lock" + " : 3 -> BOTH OTP Block and 1st Block lock"); + /** * onenand_oob_128 - oob info for Flex-Onenand with 4KB page * For now, we expose only 64 out of 80 ecc bytes @@ -339,6 +360,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le break; case ONENAND_CMD_ERASE: + case ONENAND_CMD_MULTIBLOCK_ERASE: + case ONENAND_CMD_ERASE_VERIFY: case ONENAND_CMD_BUFFERRAM: case ONENAND_CMD_OTP_ACCESS: block = onenand_block(this, addr); @@ -483,7 +506,7 @@ static int onenand_wait(struct mtd_info *mtd, int state) if (interrupt & flags) break; - if (state != FL_READING) + if (state != FL_READING && state != FL_PREPARING_ERASE) cond_resched(); } /* To get correct interrupt status in timeout case */ @@ -516,6 +539,18 @@ static int onenand_wait(struct mtd_info *mtd, int state) return -EIO; } + if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) { + printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n", + __func__, ctrl, interrupt); + return -EIO; + } + + if (!(interrupt & ONENAND_INT_MASTER)) { + printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n", + __func__, ctrl, interrupt); + return -EIO; + } + /* If there's controller error, it's a real error */ if (ctrl & ONENAND_CTRL_ERROR) { printk(KERN_ERR "%s: controller error = 0x%04x\n", @@ -2168,70 +2203,176 @@ static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allo return bbm->isbad_bbt(mtd, ofs, allowbbt); } + +static int onenand_multiblock_erase_verify(struct mtd_info *mtd, + struct erase_info *instr) +{ + struct onenand_chip *this = mtd->priv; + loff_t addr = instr->addr; + int len = instr->len; + unsigned int block_size = (1 << this->erase_shift); + int ret = 0; + + while (len) { + this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size); + ret = this->wait(mtd, FL_VERIFYING_ERASE); + if (ret) { + printk(KERN_ERR "%s: Failed verify, block %d\n", + __func__, onenand_block(this, addr)); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = addr; + return -1; + } + len -= block_size; + addr += block_size; + } + return 0; +} + /** - * onenand_erase - [MTD Interface] erase block(s) + * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase * @param mtd MTD device structure * @param instr erase instruction + * @param region erase region * - * Erase one ore more blocks + * Erase one or more blocks up to 64 block at a time */ -static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) +static int onenand_multiblock_erase(struct mtd_info *mtd, + struct erase_info *instr, + unsigned int block_size) { struct onenand_chip *this = mtd->priv; - unsigned int block_size; loff_t addr = instr->addr; - loff_t len = instr->len; - int ret = 0, i; - struct mtd_erase_region_info *region = NULL; - loff_t region_end = 0; + int len = instr->len; + int eb_count = 0; + int ret = 0; + int bdry_block = 0; - DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len); + instr->state = MTD_ERASING; - /* Do not allow erase past end of device */ - if (unlikely((len + addr) > mtd->size)) { - printk(KERN_ERR "%s: Erase past end of device\n", __func__); - return -EINVAL; + if (ONENAND_IS_DDP(this)) { + loff_t bdry_addr = this->chipsize >> 1; + if (addr < bdry_addr && (addr + len) > bdry_addr) + bdry_block = bdry_addr >> this->erase_shift; } - if (FLEXONENAND(this)) { - /* Find the eraseregion of this address */ - i = flexonenand_region(mtd, addr); - region = &mtd->eraseregions[i]; + /* Pre-check bbs */ + while (len) { + /* Check if we have a bad block, we do not erase bad blocks */ + if (onenand_block_isbad_nolock(mtd, addr, 0)) { + printk(KERN_WARNING "%s: attempt to erase a bad block " + "at addr 0x%012llx\n", + __func__, (unsigned long long) addr); + instr->state = MTD_ERASE_FAILED; + return -EIO; + } + len -= block_size; + addr += block_size; + } - block_size = region->erasesize; - region_end = region->offset + region->erasesize * region->numblocks; + len = instr->len; + addr = instr->addr; - /* Start address within region must align on block boundary. - * Erase region's start offset is always block start address. - */ - if (unlikely((addr - region->offset) & (block_size - 1))) { - printk(KERN_ERR "%s: Unaligned address\n", __func__); - return -EINVAL; + /* loop over 64 eb batches */ + while (len) { + struct erase_info verify_instr = *instr; + int max_eb_count = MB_ERASE_MAX_BLK_COUNT; + + verify_instr.addr = addr; + verify_instr.len = 0; + + /* do not cross chip boundary */ + if (bdry_block) { + int this_block = (addr >> this->erase_shift); + + if (this_block < bdry_block) { + max_eb_count = min(max_eb_count, + (bdry_block - this_block)); + } } - } else { - block_size = 1 << this->erase_shift; - /* Start address must align on block boundary */ - if (unlikely(addr & (block_size - 1))) { - printk(KERN_ERR "%s: Unaligned address\n", __func__); - return -EINVAL; + eb_count = 0; + + while (len > block_size && eb_count < (max_eb_count - 1)) { + this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE, + addr, block_size); + onenand_invalidate_bufferram(mtd, addr, block_size); + + ret = this->wait(mtd, FL_PREPARING_ERASE); + if (ret) { + printk(KERN_ERR "%s: Failed multiblock erase, " + "block %d\n", __func__, + onenand_block(this, addr)); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -EIO; + } + + len -= block_size; + addr += block_size; + eb_count++; + } + + /* last block of 64-eb series */ + cond_resched(); + this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); + onenand_invalidate_bufferram(mtd, addr, block_size); + + ret = this->wait(mtd, FL_ERASING); + /* Check if it is write protected */ + if (ret) { + printk(KERN_ERR "%s: Failed erase, block %d\n", + __func__, onenand_block(this, addr)); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + return -EIO; + } + + len -= block_size; + addr += block_size; + eb_count++; + + /* verify */ + verify_instr.len = eb_count * block_size; + if (onenand_multiblock_erase_verify(mtd, &verify_instr)) { + instr->state = verify_instr.state; + instr->fail_addr = verify_instr.fail_addr; + return -EIO; } - } - /* Length must align on block boundary */ - if (unlikely(len & (block_size - 1))) { - printk(KERN_ERR "%s: Length not block aligned\n", __func__); - return -EINVAL; } + return 0; +} - instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; - /* Grab the lock and see if the device is available */ - onenand_get_device(mtd, FL_ERASING); +/** + * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase + * @param mtd MTD device structure + * @param instr erase instruction + * @param region erase region + * @param block_size erase block size + * + * Erase one or more blocks one block at a time + */ +static int onenand_block_by_block_erase(struct mtd_info *mtd, + struct erase_info *instr, + struct mtd_erase_region_info *region, + unsigned int block_size) +{ + struct onenand_chip *this = mtd->priv; + loff_t addr = instr->addr; + int len = instr->len; + loff_t region_end = 0; + int ret = 0; + + if (region) { + /* region is set for Flex-OneNAND */ + region_end = region->offset + region->erasesize * region->numblocks; + } - /* Loop through the blocks */ instr->state = MTD_ERASING; + /* Loop through the blocks */ while (len) { cond_resched(); @@ -2241,7 +2382,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) "at addr 0x%012llx\n", __func__, (unsigned long long) addr); instr->state = MTD_ERASE_FAILED; - goto erase_exit; + return -EIO; } this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); @@ -2255,7 +2396,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) __func__, onenand_block(this, addr)); instr->state = MTD_ERASE_FAILED; instr->fail_addr = addr; - goto erase_exit; + return -EIO; } len -= block_size; @@ -2273,24 +2414,86 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) /* FIXME: This should be handled at MTD partitioning level. */ printk(KERN_ERR "%s: Unaligned address\n", __func__); - goto erase_exit; + return -EIO; } } + } + return 0; +} + +/** + * onenand_erase - [MTD Interface] erase block(s) + * @param mtd MTD device structure + * @param instr erase instruction + * + * Erase one or more blocks + */ +static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + struct onenand_chip *this = mtd->priv; + unsigned int block_size; + loff_t addr = instr->addr; + loff_t len = instr->len; + int ret = 0; + struct mtd_erase_region_info *region = NULL; + loff_t region_offset = 0; + DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__, + (unsigned long long) instr->addr, (unsigned long long) instr->len); + + /* Do not allow erase past end of device */ + if (unlikely((len + addr) > mtd->size)) { + printk(KERN_ERR "%s: Erase past end of device\n", __func__); + return -EINVAL; } - instr->state = MTD_ERASE_DONE; + if (FLEXONENAND(this)) { + /* Find the eraseregion of this address */ + int i = flexonenand_region(mtd, addr); -erase_exit: + region = &mtd->eraseregions[i]; + block_size = region->erasesize; + + /* Start address within region must align on block boundary. + * Erase region's start offset is always block start address. + */ + region_offset = region->offset; + } else + block_size = 1 << this->erase_shift; - ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; + /* Start address must align on block boundary */ + if (unlikely((addr - region_offset) & (block_size - 1))) { + printk(KERN_ERR "%s: Unaligned address\n", __func__); + return -EINVAL; + } + + /* Length must align on block boundary */ + if (unlikely(len & (block_size - 1))) { + printk(KERN_ERR "%s: Length not block aligned\n", __func__); + return -EINVAL; + } + + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + + /* Grab the lock and see if the device is available */ + onenand_get_device(mtd, FL_ERASING); + + if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) { + /* region is set for Flex-OneNAND (no mb erase) */ + ret = onenand_block_by_block_erase(mtd, instr, + region, block_size); + } else { + ret = onenand_multiblock_erase(mtd, instr, block_size); + } /* Deselect and wake up anyone waiting on the device */ onenand_release_device(mtd); /* Do call back function */ - if (!ret) + if (!ret) { + instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); + } return ret; } @@ -2591,6 +2794,208 @@ static void onenand_unlock_all(struct mtd_info *mtd) #ifdef CONFIG_MTD_ONENAND_OTP +/** + * onenand_otp_command - Send OTP specific command to OneNAND device + * @param mtd MTD device structure + * @param cmd the command to be sent + * @param addr offset to read from or write to + * @param len number of bytes to read or write + */ +static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, + size_t len) +{ + struct onenand_chip *this = mtd->priv; + int value, block, page; + + /* Address translation */ + switch (cmd) { + case ONENAND_CMD_OTP_ACCESS: + block = (int) (addr >> this->erase_shift); + page = -1; + break; + + default: + block = (int) (addr >> this->erase_shift); + page = (int) (addr >> this->page_shift); + + if (ONENAND_IS_2PLANE(this)) { + /* Make the even block number */ + block &= ~1; + /* Is it the odd plane? */ + if (addr & this->writesize) + block++; + page >>= 1; + } + page &= this->page_mask; + break; + } + + if (block != -1) { + /* Write 'DFS, FBA' of Flash */ + value = onenand_block_address(this, block); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS1); + } + + if (page != -1) { + /* Now we use page size operation */ + int sectors = 4, count = 4; + int dataram; + + switch (cmd) { + default: + if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG) + cmd = ONENAND_CMD_2X_PROG; + dataram = ONENAND_CURRENT_BUFFERRAM(this); + break; + } + + /* Write 'FPA, FSA' of Flash */ + value = onenand_page_address(page, sectors); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS8); + + /* Write 'BSA, BSC' of DataRAM */ + value = onenand_buffer_address(dataram, sectors, count); + this->write_word(value, this->base + ONENAND_REG_START_BUFFER); + } + + /* Interrupt clear */ + this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT); + + /* Write command */ + this->write_word(cmd, this->base + ONENAND_REG_COMMAND); + + return 0; +} + +/** + * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP + * @param mtd MTD device structure + * @param to offset to write to + * @param len number of bytes to write + * @param retlen pointer to variable to store the number of written bytes + * @param buf the data to write + * + * OneNAND write out-of-band only for OTP + */ +static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + struct onenand_chip *this = mtd->priv; + int column, ret = 0, oobsize; + int written = 0; + u_char *oobbuf; + size_t len = ops->ooblen; + const u_char *buf = ops->oobbuf; + int block, value, status; + + to += ops->ooboffs; + + /* Initialize retlen, in case of early exit */ + ops->oobretlen = 0; + + oobsize = mtd->oobsize; + + column = to & (mtd->oobsize - 1); + + oobbuf = this->oob_buf; + + /* Loop until all data write */ + while (written < len) { + int thislen = min_t(int, oobsize, len - written); + + cond_resched(); + + block = (int) (to >> this->erase_shift); + /* + * Write 'DFS, FBA' of Flash + * Add: F100h DQ=DFS, FBA + */ + + value = onenand_block_address(this, block); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS1); + + /* + * Select DataRAM for DDP + * Add: F101h DQ=DBS + */ + + value = onenand_bufferram_address(this, block); + this->write_word(value, this->base + + ONENAND_REG_START_ADDRESS2); + ONENAND_SET_NEXT_BUFFERRAM(this); + + /* + * Enter OTP access mode + */ + this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); + this->wait(mtd, FL_OTPING); + + /* We send data to spare ram with oobsize + * to prevent byte access */ + memcpy(oobbuf + column, buf, thislen); + + /* + * Write Data into DataRAM + * Add: 8th Word + * in sector0/spare/page0 + * DQ=XXFCh + */ + this->write_bufferram(mtd, ONENAND_SPARERAM, + oobbuf, 0, mtd->oobsize); + + onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize); + onenand_update_bufferram(mtd, to, 0); + if (ONENAND_IS_2PLANE(this)) { + ONENAND_SET_BUFFERRAM1(this); + onenand_update_bufferram(mtd, to + this->writesize, 0); + } + + ret = this->wait(mtd, FL_WRITING); + if (ret) { + printk(KERN_ERR "%s: write failed %d\n", __func__, ret); + break; + } + + /* Exit OTP access mode */ + this->command(mtd, ONENAND_CMD_RESET, 0, 0); + this->wait(mtd, FL_RESETING); + + status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); + status &= 0x60; + + if (status == 0x60) { + printk(KERN_DEBUG "\nBLOCK\tSTATUS\n"); + printk(KERN_DEBUG "1st Block\tLOCKED\n"); + printk(KERN_DEBUG "OTP Block\tLOCKED\n"); + } else if (status == 0x20) { + printk(KERN_DEBUG "\nBLOCK\tSTATUS\n"); + printk(KERN_DEBUG "1st Block\tLOCKED\n"); + printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n"); + } else if (status == 0x40) { + printk(KERN_DEBUG "\nBLOCK\tSTATUS\n"); + printk(KERN_DEBUG "1st Block\tUN-LOCKED\n"); + printk(KERN_DEBUG "OTP Block\tLOCKED\n"); + } else { + printk(KERN_DEBUG "Reboot to check\n"); + } + + written += thislen; + if (written == len) + break; + + to += mtd->writesize; + buf += thislen; + column = 0; + } + + ops->oobretlen = written; + + return ret; +} + /* Internal OTP operation */ typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, size_t *retlen, u_char *buf); @@ -2693,11 +3098,11 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, struct mtd_oob_ops ops; int ret; - /* Enter OTP access mode */ - this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); - this->wait(mtd, FL_OTPING); - if (FLEXONENAND(this)) { + + /* Enter OTP access mode */ + this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); + this->wait(mtd, FL_OTPING); /* * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of * main area of page 49. @@ -2708,19 +3113,19 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, ops.oobbuf = NULL; ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops); *retlen = ops.retlen; + + /* Exit OTP access mode */ + this->command(mtd, ONENAND_CMD_RESET, 0, 0); + this->wait(mtd, FL_RESETING); } else { ops.mode = MTD_OOB_PLACE; ops.ooblen = len; ops.oobbuf = buf; ops.ooboffs = 0; - ret = onenand_write_oob_nolock(mtd, from, &ops); + ret = onenand_otp_write_oob_nolock(mtd, from, &ops); *retlen = ops.oobretlen; } - /* Exit OTP access mode */ - this->command(mtd, ONENAND_CMD_RESET, 0, 0); - this->wait(mtd, FL_RESETING); - return ret; } @@ -2751,16 +3156,21 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, if (density < ONENAND_DEVICE_DENSITY_512Mb) otp_pages = 20; else - otp_pages = 10; + otp_pages = 50; if (mode == MTD_OTP_FACTORY) { from += mtd->writesize * otp_pages; - otp_pages = 64 - otp_pages; + otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages; } /* Check User/Factory boundary */ - if (((mtd->writesize * otp_pages) - (from + len)) < 0) - return 0; + if (mode == MTD_OTP_USER) { + if (((mtd->writesize * otp_pages) - (from + len)) < 0) + return 0; + } else { + if (((mtd->writesize * otp_pages) - len) < 0) + return 0; + } onenand_get_device(mtd, FL_OTPING); while (len > 0 && otp_pages > 0) { @@ -2783,13 +3193,12 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, *retlen += sizeof(struct otp_info); } else { size_t tmp_retlen; - int size = len; ret = action(mtd, from, len, &tmp_retlen, buf); - buf += size; - len -= size; - *retlen += size; + buf += tmp_retlen; + len -= tmp_retlen; + *retlen += tmp_retlen; if (ret) break; @@ -2902,21 +3311,11 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf; size_t retlen; int ret; + unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET; memset(buf, 0xff, FLEXONENAND(this) ? this->writesize : mtd->oobsize); /* - * Note: OTP lock operation - * OTP block : 0xXXFC - * 1st block : 0xXXF3 (If chip support) - * Both : 0xXXF0 (If chip support) - */ - if (FLEXONENAND(this)) - buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC; - else - buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC; - - /* * Write lock mark to 8th word of sector0 of page0 of the spare0. * We write 16 bytes spare area instead of 2 bytes. * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of @@ -2926,10 +3325,30 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, from = 0; len = FLEXONENAND(this) ? mtd->writesize : 16; + /* + * Note: OTP lock operation + * OTP block : 0xXXFC XX 1111 1100 + * 1st block : 0xXXF3 (If chip support) XX 1111 0011 + * Both : 0xXXF0 (If chip support) XX 1111 0000 + */ + if (FLEXONENAND(this)) + otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET; + + /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */ + if (otp == 1) + buf[otp_lock_offset] = 0xFC; + else if (otp == 2) + buf[otp_lock_offset] = 0xF3; + else if (otp == 3) + buf[otp_lock_offset] = 0xF0; + else if (otp != 0) + printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n"); + ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER); return ret ? : retlen; } + #endif /* CONFIG_MTD_ONENAND_OTP */ /** @@ -3279,8 +3698,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die, this->command(mtd, ONENAND_CMD_ERASE, addr, 0); ret = this->wait(mtd, FL_ERASING); if (ret) { - printk(KERN_ERR "%s: flexonenand_set_boundary: " - "Failed PI erase for Die %d\n", __func__, die); + printk(KERN_ERR "%s: Failed PI erase for Die %d\n", + __func__, die); goto out; } diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile index c1d50133500..b44dcab940d 100644 --- a/drivers/mtd/tests/Makefile +++ b/drivers/mtd/tests/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o +obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c new file mode 100644 index 00000000000..c1f31051784 --- /dev/null +++ b/drivers/mtd/tests/mtd_nandecctest.c @@ -0,0 +1,87 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <linux/string.h> +#include <linux/bitops.h> +#include <linux/jiffies.h> +#include <linux/mtd/nand_ecc.h> + +#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE) + +static void inject_single_bit_error(void *data, size_t size) +{ + unsigned long offset = random32() % (size * BITS_PER_BYTE); + + __change_bit(offset, data); +} + +static unsigned char data[512]; +static unsigned char error_data[512]; + +static int nand_ecc_test(const size_t size) +{ + unsigned char code[3]; + unsigned char error_code[3]; + char testname[30]; + + BUG_ON(sizeof(data) < size); + + sprintf(testname, "nand-ecc-%zu", size); + + get_random_bytes(data, size); + + memcpy(error_data, data, size); + inject_single_bit_error(error_data, size); + + __nand_calculate_ecc(data, size, code); + __nand_calculate_ecc(error_data, size, error_code); + __nand_correct_data(error_data, code, error_code, size); + + if (!memcmp(data, error_data, size)) { + printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname); + return 0; + } + + printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname); + + printk(KERN_DEBUG "hexdump of data:\n"); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, + data, size, false); + printk(KERN_DEBUG "hexdump of error data:\n"); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4, + error_data, size, false); + + return -1; +} + +#else + +static int nand_ecc_test(const size_t size) +{ + return 0; +} + +#endif + +static int __init ecc_test_init(void) +{ + srandom32(jiffies); + + nand_ecc_test(256); + nand_ecc_test(512); + + return 0; +} + +static void __exit ecc_test_exit(void) +{ +} + +module_init(ecc_test_init); +module_exit(ecc_test_exit); + +MODULE_DESCRIPTION("NAND ECC function test module"); +MODULE_AUTHOR("Akinobu Mita"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c index 103cac480fe..ce17cbe918c 100644 --- a/drivers/mtd/tests/mtd_pagetest.c +++ b/drivers/mtd/tests/mtd_pagetest.c @@ -523,6 +523,7 @@ static int __init mtd_pagetest_init(void) do_div(tmp, mtd->erasesize); ebcnt = tmp; pgcnt = mtd->erasesize / mtd->writesize; + pgsize = mtd->writesize; printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " "page size %u, count of eraseblocks %u, pages per " diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 1a80301004b..9c26738ef04 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1284,7 +1284,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, f->target = NULL; mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); - return -ret; + return ret; } f->target[je32_to_cpu(latest_node->csize)] = '\0'; diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index 6caf1e1ee26..800171dca53 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c @@ -23,7 +23,7 @@ int jffs2_sum_init(struct jffs2_sb_info *c) { - uint32_t sum_size = max_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); + uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h new file mode 100644 index 00000000000..7f089ec5ef3 --- /dev/null +++ b/include/linux/kmsg_dump.h @@ -0,0 +1,44 @@ +/* + * linux/include/kmsg_dump.h + * + * Copyright (C) 2009 Net Insight AB + * + * Author: Simon Kagstrom <simon.kagstrom@netinsight.net> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#ifndef _LINUX_KMSG_DUMP_H +#define _LINUX_KMSG_DUMP_H + +#include <linux/list.h> + +enum kmsg_dump_reason { + KMSG_DUMP_OOPS, + KMSG_DUMP_PANIC, +}; + +/** + * struct kmsg_dumper - kernel crash message dumper structure + * @dump: The callback which gets called on crashes. The buffer is passed + * as two sections, where s1 (length l1) contains the older + * messages and s2 (length l2) contains the newer. + * @list: Entry in the dumper list (private) + * @registered: Flag that specifies if this is already registered + */ +struct kmsg_dumper { + void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, + const char *s1, unsigned long l1, + const char *s2, unsigned long l2); + struct list_head list; + int registered; +}; + +void kmsg_dump(enum kmsg_dump_reason reason); + +int kmsg_dump_register(struct kmsg_dumper *dumper); + +int kmsg_dump_unregister(struct kmsg_dumper *dumper); + +#endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 88d3d8fbf9f..df89f427523 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -518,10 +518,11 @@ struct cfi_fixup { #define CFI_MFR_ANY 0xffff #define CFI_ID_ANY 0xffff -#define CFI_MFR_AMD 0x0001 -#define CFI_MFR_ATMEL 0x001F -#define CFI_MFR_SAMSUNG 0x00EC -#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_AMD 0x0001 +#define CFI_MFR_INTEL 0x0089 +#define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_SAMSUNG 0x00EC +#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index f350a4879f7..d0bf422ae37 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -41,9 +41,11 @@ typedef enum { /* These 2 come from nand_state_t, which has been unified here */ FL_READING, FL_CACHEDPRG, - /* These 2 come from onenand_state_t, which has been unified here */ + /* These 4 come from onenand_state_t, which has been unified here */ FL_RESETING, FL_OTPING, + FL_PREPARING_ERASE, + FL_VERIFYING_ERASE, FL_UNKNOWN } flstate_t; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 2476078a032..ccab9dfc521 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -170,7 +170,6 @@ typedef enum { /* Chip does not allow subpage writes */ #define NAND_NO_SUBPAGE_WRITE 0x00000200 - /* Options valid for Samsung large page devices */ #define NAND_SAMSUNG_LP_OPTIONS \ (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) @@ -196,6 +195,9 @@ typedef enum { /* This option is defined if the board driver allocates its own buffers (e.g. because it needs them DMA-coherent */ #define NAND_OWN_BUFFERS 0x00040000 +/* Chip may not exist, so silence any errors in scan */ +#define NAND_SCAN_SILENT_NODEV 0x00080000 + /* Options set by nand scan */ /* Nand scan has allocated controller struct */ #define NAND_CONTROLLER_ALLOC 0x80000000 diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 052ea8ca243..41bc013571d 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -16,7 +16,13 @@ struct mtd_info; /* - * Calculate 3 byte ECC code for 256 byte block + * Calculate 3 byte ECC code for eccsize byte block + */ +void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, + u_char *ecc_code); + +/* + * Calculate 3 byte ECC code for 256/512 byte block */ int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); @@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, unsigned int eccsize); /* - * Detect and correct a 1 bit error for 256 byte block + * Detect and correct a 1 bit error for 256/512 byte block */ int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index f57e29e17bb..5509eb06b32 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -1,7 +1,7 @@ /* * linux/include/linux/mtd/onenand.h * - * Copyright (C) 2005-2007 Samsung Electronics + * Copyright © 2005-2009 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * * This program is free software; you can redistribute it and/or modify @@ -137,6 +137,8 @@ struct onenand_chip { /* * Helper macros */ +#define ONENAND_PAGES_PER_BLOCK (1<<6) + #define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) #define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) #define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index acadbf53a69..cd6f3b43119 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -131,6 +131,8 @@ #define ONENAND_CMD_LOCK_TIGHT (0x2C) #define ONENAND_CMD_UNLOCK_ALL (0x27) #define ONENAND_CMD_ERASE (0x94) +#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) +#define ONENAND_CMD_ERASE_VERIFY (0x71) #define ONENAND_CMD_RESET (0xF0) #define ONENAND_CMD_OTP_ACCESS (0x65) #define ONENAND_CMD_READID (0x90) diff --git a/kernel/panic.c b/kernel/panic.c index bcdef26e333..8c43226a544 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -10,6 +10,7 @@ */ #include <linux/debug_locks.h> #include <linux/interrupt.h> +#include <linux/kmsg_dump.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/module.h> @@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...) dump_stack(); #endif + kmsg_dump(KMSG_DUMP_PANIC); /* * If we have crashed and we have a crash kernel loaded let it handle * everything else. @@ -338,6 +340,7 @@ void oops_exit(void) { do_oops_enter_exit(); print_oops_end_marker(); + kmsg_dump(KMSG_DUMP_OOPS); } #ifdef WANT_WARN_ON_SLOWPATH diff --git a/kernel/printk.c b/kernel/printk.c index f38b07f78a4..051d1f50648 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -33,6 +33,7 @@ #include <linux/bootmem.h> #include <linux/syscalls.h> #include <linux/kexec.h> +#include <linux/kmsg_dump.h> #include <asm/uaccess.h> @@ -1405,3 +1406,121 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies, } EXPORT_SYMBOL(printk_timed_ratelimit); #endif + +static DEFINE_SPINLOCK(dump_list_lock); +static LIST_HEAD(dump_list); + +/** + * kmsg_dump_register - register a kernel log dumper. + * @dump: pointer to the kmsg_dumper structure + * + * Adds a kernel log dumper to the system. The dump callback in the + * structure will be called when the kernel oopses or panics and must be + * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. + */ +int kmsg_dump_register(struct kmsg_dumper *dumper) +{ + unsigned long flags; + int err = -EBUSY; + + /* The dump callback needs to be set */ + if (!dumper->dump) + return -EINVAL; + + spin_lock_irqsave(&dump_list_lock, flags); + /* Don't allow registering multiple times */ + if (!dumper->registered) { + dumper->registered = 1; + list_add_tail(&dumper->list, &dump_list); + err = 0; + } + spin_unlock_irqrestore(&dump_list_lock, flags); + + return err; +} +EXPORT_SYMBOL_GPL(kmsg_dump_register); + +/** + * kmsg_dump_unregister - unregister a kmsg dumper. + * @dump: pointer to the kmsg_dumper structure + * + * Removes a dump device from the system. Returns zero on success and + * %-EINVAL otherwise. + */ +int kmsg_dump_unregister(struct kmsg_dumper *dumper) +{ + unsigned long flags; + int err = -EINVAL; + + spin_lock_irqsave(&dump_list_lock, flags); + if (dumper->registered) { + dumper->registered = 0; + list_del(&dumper->list); + err = 0; + } + spin_unlock_irqrestore(&dump_list_lock, flags); + + return err; +} +EXPORT_SYMBOL_GPL(kmsg_dump_unregister); + +static const char const *kmsg_reasons[] = { + [KMSG_DUMP_OOPS] = "oops", + [KMSG_DUMP_PANIC] = "panic", +}; + +static const char *kmsg_to_str(enum kmsg_dump_reason reason) +{ + if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0) + return "unknown"; + + return kmsg_reasons[reason]; +} + +/** + * kmsg_dump - dump kernel log to kernel message dumpers. + * @reason: the reason (oops, panic etc) for dumping + * + * Iterate through each of the dump devices and call the oops/panic + * callbacks with the log buffer. + */ +void kmsg_dump(enum kmsg_dump_reason reason) +{ + unsigned long end; + unsigned chars; + struct kmsg_dumper *dumper; + const char *s1, *s2; + unsigned long l1, l2; + unsigned long flags; + + /* Theoretically, the log could move on after we do this, but + there's not a lot we can do about that. The new messages + will overwrite the start of what we dump. */ + spin_lock_irqsave(&logbuf_lock, flags); + end = log_end & LOG_BUF_MASK; + chars = logged_chars; + spin_unlock_irqrestore(&logbuf_lock, flags); + + if (logged_chars > end) { + s1 = log_buf + log_buf_len - logged_chars + end; + l1 = logged_chars - end; + + s2 = log_buf; + l2 = end; + } else { + s1 = ""; + l1 = 0; + + s2 = log_buf + end - logged_chars; + l2 = logged_chars; + } + + if (!spin_trylock_irqsave(&dump_list_lock, flags)) { + printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n", + kmsg_to_str(reason)); + return; + } + list_for_each_entry(dumper, &dump_list, list) + dumper->dump(dumper, reason, s1, l1, s2, l2); + spin_unlock_irqrestore(&dump_list_lock, flags); +} |