diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2012-08-22 12:54:55 +0100 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2012-08-22 12:55:43 +0100 |
commit | 2361f738b67ab7f1152187fa3d321a09b7c95c09 (patch) | |
tree | eddf2b958215b668b9b871b6b59bc76c67a47751 /drivers/mtd | |
parent | 4800399e335658aae632f587f6759a860f584804 (diff) | |
parent | d9875690d9b89a866022ff49e3fcea892345ad92 (diff) |
Merge tag 'v3.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Having missed the merge window, update to 3.6-rc2 to avoid conflicts with
new patches.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/maps/uclinux.c | 5 | ||||
-rw-r--r-- | drivers/mtd/mtdoops.c | 22 | ||||
-rw-r--r-- | drivers/mtd/mtdsuper.c | 4 | ||||
-rw-r--r-- | drivers/mtd/nand/Kconfig | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/jz4740_nand.c | 228 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 105 | ||||
-rw-r--r-- | drivers/mtd/nand/orion_nand.c | 6 | ||||
-rw-r--r-- | drivers/mtd/ubi/Kconfig | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/cdev.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/debug.c | 8 | ||||
-rw-r--r-- | drivers/mtd/ubi/misc.c | 25 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 1 | ||||
-rw-r--r-- | drivers/mtd/ubi/vmt.c | 20 |
13 files changed, 293 insertions, 137 deletions
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index cfff454f628..c3bb304eca0 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c @@ -19,14 +19,13 @@ #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> +#include <asm/sections.h> /****************************************************************************/ -extern char _ebss; - struct map_info uclinux_ram_map = { .name = "RAM", - .phys = (unsigned long)&_ebss, + .phys = (unsigned long)__bss_stop, .size = 0, }; diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 6ba9507b7c8..788f00be8d0 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -302,32 +302,17 @@ static void find_next_position(struct mtdoops_context *cxt) } static void mtdoops_do_dump(struct kmsg_dumper *dumper, - enum kmsg_dump_reason reason, const char *s1, unsigned long l1, - const char *s2, unsigned long l2) + enum kmsg_dump_reason reason) { struct mtdoops_context *cxt = container_of(dumper, struct mtdoops_context, dump); - unsigned long s1_start, s2_start; - unsigned long l1_cpy, l2_cpy; - char *dst; - - if (reason != KMSG_DUMP_OOPS && - reason != KMSG_DUMP_PANIC) - return; /* Only dump oopses if dump_oops is set */ if (reason == KMSG_DUMP_OOPS && !dump_oops) return; - dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ - l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); - l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); - - s2_start = l2 - l2_cpy; - s1_start = l1 - l1_cpy; - - memcpy(dst, s1 + s1_start, l1_cpy); - memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, + record_size - MTDOOPS_HEADER_SIZE, NULL); /* Panics must be written immediately */ if (reason != KMSG_DUMP_OOPS) @@ -373,6 +358,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) return; } + cxt->dump.max_reason = KMSG_DUMP_OOPS; cxt->dump.dump = mtdoops_do_dump; err = kmsg_dump_register(&cxt->dump); if (err) { diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index a90bfe79916..334da5f583c 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -63,7 +63,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, struct super_block *sb; int ret; - sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd); + sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd); if (IS_ERR(sb)) goto out_error; @@ -74,8 +74,6 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, pr_debug("MTDSB: New superblock for device %d (\"%s\")\n", mtd->index, mtd->name); - sb->s_flags = flags; - ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (ret < 0) { deactivate_locked_super(sb); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index f4e81a7742b..588e98930aa 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -462,7 +462,7 @@ config MTD_NAND_NANDSIM config MTD_NAND_GPMI_NAND bool "GPMI NAND Flash Controller driver" - depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q) + depends on MTD_NAND && MXS_DMA help Enables NAND Flash support for IMX23 or IMX28. The GPMI controller is very powerful, with the help of BCH diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index a6fa884ae49..100b6775e17 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -52,9 +52,10 @@ #define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1) #define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1) +#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa -#define JZ_NAND_MEM_ADDR_OFFSET 0x10000 #define JZ_NAND_MEM_CMD_OFFSET 0x08000 +#define JZ_NAND_MEM_ADDR_OFFSET 0x10000 struct jz_nand { struct mtd_info mtd; @@ -62,8 +63,11 @@ struct jz_nand { void __iomem *base; struct resource *mem; - void __iomem *bank_base; - struct resource *bank_mem; + unsigned char banks[JZ_NAND_NUM_BANKS]; + void __iomem *bank_base[JZ_NAND_NUM_BANKS]; + struct resource *bank_mem[JZ_NAND_NUM_BANKS]; + + int selected_bank; struct jz_nand_platform_data *pdata; bool is_reading; @@ -74,26 +78,50 @@ static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd) return container_of(mtd, struct jz_nand, mtd); } +static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr) +{ + struct jz_nand *nand = mtd_to_jz_nand(mtd); + struct nand_chip *chip = mtd->priv; + uint32_t ctrl; + int banknr; + + ctrl = readl(nand->base + JZ_REG_NAND_CTRL); + ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK; + + if (chipnr == -1) { + banknr = -1; + } else { + banknr = nand->banks[chipnr] - 1; + chip->IO_ADDR_R = nand->bank_base[banknr]; + chip->IO_ADDR_W = nand->bank_base[banknr]; + } + writel(ctrl, nand->base + JZ_REG_NAND_CTRL); + + nand->selected_bank = banknr; +} + static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct jz_nand *nand = mtd_to_jz_nand(mtd); struct nand_chip *chip = mtd->priv; uint32_t reg; + void __iomem *bank_base = nand->bank_base[nand->selected_bank]; + + BUG_ON(nand->selected_bank < 0); if (ctrl & NAND_CTRL_CHANGE) { BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE)); if (ctrl & NAND_ALE) - chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET; + bank_base += JZ_NAND_MEM_ADDR_OFFSET; else if (ctrl & NAND_CLE) - chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET; - else - chip->IO_ADDR_W = nand->bank_base; + bank_base += JZ_NAND_MEM_CMD_OFFSET; + chip->IO_ADDR_W = bank_base; reg = readl(nand->base + JZ_REG_NAND_CTRL); if (ctrl & NAND_NCE) - reg |= JZ_NAND_CTRL_ASSERT_CHIP(0); + reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank); else - reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0); + reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank); writel(reg, nand->base + JZ_REG_NAND_CTRL); } if (dat != NAND_CMD_NONE) @@ -252,7 +280,7 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat, } static int jz_nand_ioremap_resource(struct platform_device *pdev, - const char *name, struct resource **res, void __iomem **base) + const char *name, struct resource **res, void *__iomem *base) { int ret; @@ -288,6 +316,90 @@ err: return ret; } +static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base) +{ + iounmap(base); + release_mem_region(res->start, resource_size(res)); +} + +static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) { + int ret; + int gpio; + char gpio_name[9]; + char res_name[6]; + uint32_t ctrl; + struct mtd_info *mtd = &nand->mtd; + struct nand_chip *chip = &nand->chip; + + /* Request GPIO port. */ + gpio = JZ_GPIO_MEM_CS0 + bank - 1; + sprintf(gpio_name, "NAND CS%d", bank); + ret = gpio_request(gpio, gpio_name); + if (ret) { + dev_warn(&pdev->dev, + "Failed to request %s gpio %d: %d\n", + gpio_name, gpio, ret); + goto notfound_gpio; + } + + /* Request I/O resource. */ + sprintf(res_name, "bank%d", bank); + ret = jz_nand_ioremap_resource(pdev, res_name, + &nand->bank_mem[bank - 1], + &nand->bank_base[bank - 1]); + if (ret) + goto notfound_resource; + + /* Enable chip in bank. */ + jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0); + ctrl = readl(nand->base + JZ_REG_NAND_CTRL); + ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1); + writel(ctrl, nand->base + JZ_REG_NAND_CTRL); + + if (chipnr == 0) { + /* Detect first chip. */ + ret = nand_scan_ident(mtd, 1, NULL); + if (ret) + goto notfound_id; + + /* Retrieve the IDs from the first chip. */ + chip->select_chip(mtd, 0); + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); + *nand_maf_id = chip->read_byte(mtd); + *nand_dev_id = chip->read_byte(mtd); + } else { + /* Detect additional chip. */ + chip->select_chip(mtd, chipnr); + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); + if (*nand_maf_id != chip->read_byte(mtd) + || *nand_dev_id != chip->read_byte(mtd)) { + ret = -ENODEV; + goto notfound_id; + } + + /* Update size of the MTD. */ + chip->numchips++; + mtd->size += chip->chipsize; + } + + dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank); + return 0; + +notfound_id: + dev_info(&pdev->dev, "No chip found on bank %i\n", bank); + ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1)); + writel(ctrl, nand->base + JZ_REG_NAND_CTRL); + jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE); + jz_nand_iounmap_resource(nand->bank_mem[bank - 1], + nand->bank_base[bank - 1]); +notfound_resource: + gpio_free(gpio); +notfound_gpio: + return ret; +} + static int __devinit jz_nand_probe(struct platform_device *pdev) { int ret; @@ -295,6 +407,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct jz_nand_platform_data *pdata = pdev->dev.platform_data; + size_t chipnr, bank_idx; + uint8_t nand_maf_id = 0, nand_dev_id = 0; nand = kzalloc(sizeof(*nand), GFP_KERNEL); if (!nand) { @@ -305,10 +419,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base); if (ret) goto err_free; - ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem, - &nand->bank_base); - if (ret) - goto err_iounmap_mmio; if (pdata && gpio_is_valid(pdata->busy_gpio)) { ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); @@ -316,7 +426,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Failed to request busy gpio %d: %d\n", pdata->busy_gpio, ret); - goto err_iounmap_mem; + goto err_iounmap_mmio; } } @@ -339,22 +449,51 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) chip->chip_delay = 50; chip->cmd_ctrl = jz_nand_cmd_ctrl; + chip->select_chip = jz_nand_select_chip; if (pdata && gpio_is_valid(pdata->busy_gpio)) chip->dev_ready = jz_nand_dev_ready; - chip->IO_ADDR_R = nand->bank_base; - chip->IO_ADDR_W = nand->bank_base; - nand->pdata = pdata; platform_set_drvdata(pdev, nand); - writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL); - - ret = nand_scan_ident(mtd, 1, NULL); - if (ret) { - dev_err(&pdev->dev, "Failed to scan nand\n"); - goto err_gpio_free; + /* We are going to autodetect NAND chips in the banks specified in the + * platform data. Although nand_scan_ident() can detect multiple chips, + * it requires those chips to be numbered consecuitively, which is not + * always the case for external memory banks. And a fixed chip-to-bank + * mapping is not practical either, since for example Dingoo units + * produced at different times have NAND chips in different banks. + */ + chipnr = 0; + for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) { + unsigned char bank; + + /* If there is no platform data, look for NAND in bank 1, + * which is the most likely bank since it is the only one + * that can be booted from. + */ + bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1; + if (bank == 0) + break; + if (bank > JZ_NAND_NUM_BANKS) { + dev_warn(&pdev->dev, + "Skipping non-existing bank: %d\n", bank); + continue; + } + /* The detection routine will directly or indirectly call + * jz_nand_select_chip(), so nand->banks has to contain the + * bank we're checking. + */ + nand->banks[chipnr] = bank; + if (jz_nand_detect_bank(pdev, nand, bank, chipnr, + &nand_maf_id, &nand_dev_id) == 0) + chipnr++; + else + nand->banks[chipnr] = 0; + } + if (chipnr == 0) { + dev_err(&pdev->dev, "No NAND chips found\n"); + goto err_gpio_busy; } if (pdata && pdata->ident_callback) { @@ -364,8 +503,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) ret = nand_scan_tail(mtd); if (ret) { - dev_err(&pdev->dev, "Failed to scan nand\n"); - goto err_gpio_free; + dev_err(&pdev->dev, "Failed to scan NAND\n"); + goto err_unclaim_banks; } ret = mtd_device_parse_register(mtd, NULL, NULL, @@ -382,14 +521,21 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) return 0; err_nand_release: - nand_release(&nand->mtd); -err_gpio_free: + nand_release(mtd); +err_unclaim_banks: + while (chipnr--) { + unsigned char bank = nand->banks[chipnr]; + gpio_free(JZ_GPIO_MEM_CS0 + bank - 1); + jz_nand_iounmap_resource(nand->bank_mem[bank - 1], + nand->bank_base[bank - 1]); + } + writel(0, nand->base + JZ_REG_NAND_CTRL); +err_gpio_busy: + if (pdata && gpio_is_valid(pdata->busy_gpio)) + gpio_free(pdata->busy_gpio); platform_set_drvdata(pdev, NULL); - gpio_free(pdata->busy_gpio); -err_iounmap_mem: - iounmap(nand->bank_base); err_iounmap_mmio: - iounmap(nand->base); + jz_nand_iounmap_resource(nand->mem, nand->base); err_free: kfree(nand); return ret; @@ -398,16 +544,26 @@ err_free: static int __devexit jz_nand_remove(struct platform_device *pdev) { struct jz_nand *nand = platform_get_drvdata(pdev); + struct jz_nand_platform_data *pdata = pdev->dev.platform_data; + size_t i; nand_release(&nand->mtd); /* Deassert and disable all chips */ writel(0, nand->base + JZ_REG_NAND_CTRL); - iounmap(nand->bank_base); - release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem)); - iounmap(nand->base); - release_mem_region(nand->mem->start, resource_size(nand->mem)); + for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) { + unsigned char bank = nand->banks[i]; + if (bank != 0) { + jz_nand_iounmap_resource(nand->bank_mem[bank - 1], + nand->bank_base[bank - 1]); + gpio_free(JZ_GPIO_MEM_CS0 + bank - 1); + } + } + if (pdata && gpio_is_valid(pdata->busy_gpio)) + gpio_free(pdata->busy_gpio); + + jz_nand_iounmap_resource(nand->mem, nand->base); platform_set_drvdata(pdev, NULL); kfree(nand); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index d7f681d0c9b..ac4fd756eda 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -9,6 +9,7 @@ */ #include <linux/platform_device.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> @@ -18,6 +19,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> +#include <linux/omap-dma.h> #include <linux/io.h> #include <linux/slab.h> @@ -123,7 +125,7 @@ struct omap_nand_info { int gpmc_cs; unsigned long phys_base; struct completion comp; - int dma_ch; + struct dma_chan *dma; int gpmc_irq; enum { OMAP_NAND_IO_READ = 0, /* read */ @@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* - * omap_nand_dma_cb: callback on the completion of dma transfer - * @lch: logical channel - * @ch_satuts: channel status + * omap_nand_dma_callback: callback on the completion of dma transfer * @data: pointer to completion data structure */ -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) +static void omap_nand_dma_callback(void *data) { complete((struct completion *) data); } @@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); + struct dma_async_tx_descriptor *tx; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - dma_addr_t dma_addr; - int ret; + struct scatterlist sg; unsigned long tim, limit; - - /* The fifo depth is 64 bytes max. - * But configure the FIFO-threahold to 32 to get a sync at each frame - * and frame length is 32 bytes. - */ - int buf_len = len >> 6; + unsigned n; + int ret; if (addr >= high_memory) { struct page *p1; @@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); } - dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); - if (dma_mapping_error(&info->pdev->dev, dma_addr)) { + sg_init_one(&sg, addr, len); + n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); + if (n == 0) { dev_err(&info->pdev->dev, "Couldn't DMA map a %d byte buffer\n", len); goto out_copy; } - if (is_write) { - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); - } else { - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); - } - /* configure and start prefetch transfer */ + tx = dmaengine_prep_slave_sg(info->dma, &sg, n, + is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto out_copy_unmap; + + tx->callback = omap_nand_dma_callback; + tx->callback_param = &info->comp; + dmaengine_submit(tx); + + /* configure and start prefetch transfer */ ret = gpmc_prefetch_enable(info->gpmc_cs, - PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) /* PFPW engine is busy, use cpu copy method */ goto out_copy_unmap; init_completion(&info->comp); - - omap_start_dma(info->dma_ch); + dma_async_issue_pending(info->dma); /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); @@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); return 0; out_copy_unmap: - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); out_copy: if (info->nand.options & NAND_BUSWIDTH_16) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) @@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_platform_data *pdata; int err; int i, offset; + dma_cap_mask_t mask; + unsigned sig; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -1244,18 +1235,30 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) break; case NAND_OMAP_PREFETCH_DMA: - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - dev_err(&pdev->dev, "DMA request failed!\n"); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = OMAP24XX_DMA_GPMC; + info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!info->dma) { + dev_err(&pdev->dev, "DMA engine request failed\n"); + err = -ENXIO; goto out_release_mem_region; } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - + struct dma_slave_config cfg; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = info->phys_base; + cfg.dst_addr = info->phys_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 16; + cfg.dst_maxburst = 16; + err = dmaengine_slave_config(info->dma, &cfg); + if (err) { + dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", + err); + goto out_release_mem_region; + } info->nand.read_buf = omap_read_buf_dma_pref; info->nand.write_buf = omap_write_buf_dma_pref; } @@ -1358,6 +1361,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) return 0; out_release_mem_region: + if (info->dma) + dma_release_channel(info->dma); release_mem_region(info->phys_base, NAND_IO_SIZE); out_free_info: kfree(info); @@ -1373,8 +1378,8 @@ static int omap_nand_remove(struct platform_device *pdev) omap3_free_bch(&info->mtd); platform_set_drvdata(pdev, NULL); - if (info->dma_ch != -1) - omap_free_dma(info->dma_ch); + if (info->dma) + dma_release_channel(info->dma); if (info->gpmc_irq) free_irq(info->gpmc_irq, info); diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 513dc88a05c..fc5a868c436 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -183,6 +183,10 @@ static int __init orion_nand_probe(struct platform_device *pdev) return 0; no_dev: + if (!IS_ERR(clk)) { + clk_disable_unprepare(clk); + clk_put(clk); + } platform_set_drvdata(pdev, NULL); iounmap(io_base); no_res: @@ -214,7 +218,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev) #ifdef CONFIG_OF static struct of_device_id orion_nand_of_match_table[] = { - { .compatible = "mrvl,orion-nand", }, + { .compatible = "marvell,orion-nand", }, {}, }; #endif diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 738ee8dc16c..ea4b95b5451 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig @@ -29,7 +29,7 @@ config MTD_UBI_WL_THRESHOLD config MTD_UBI_BEB_RESERVE int "Percentage of reserved eraseblocks for bad eraseblocks handling" - default 1 + default 2 range 0 25 help If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index acec85deb6a..fb556787818 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -1026,7 +1026,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, { int ubi_num; - dbg_gen("dettach MTD device"); + dbg_gen("detach MTD device"); err = get_user(ubi_num, (__user int32_t *)argp); if (err) { err = -EFAULT; diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 09d4f8d9d59..7c138030521 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -264,7 +264,7 @@ static struct dentry *dfs_rootdir; */ int ubi_debugfs_init(void) { - if (!IS_ENABLED(DEBUG_FS)) + if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; dfs_rootdir = debugfs_create_dir("ubi", NULL); @@ -284,7 +284,7 @@ int ubi_debugfs_init(void) */ void ubi_debugfs_exit(void) { - if (IS_ENABLED(DEBUG_FS)) + if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_remove(dfs_rootdir); } @@ -407,7 +407,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) struct dentry *dent; struct ubi_debug_info *d = ubi->dbg; - if (!IS_ENABLED(DEBUG_FS)) + if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, @@ -477,6 +477,6 @@ out: */ void ubi_debugfs_exit_dev(struct ubi_device *ubi) { - if (IS_ENABLED(DEBUG_FS)) + if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_remove_recursive(ubi->dbg->dfs_dir); } diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index f6a7d7ac4b9..8bbfb444b89 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -92,7 +92,30 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id) } /** - * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad + * ubi_update_reserved - update bad eraseblock handling accounting data. + * @ubi: UBI device description object + * + * This function calculates the gap between current number of PEBs reserved for + * bad eraseblock handling and the required level of PEBs that must be + * reserved, and if necessary, reserves more PEBs to fill that gap, according + * to availability. Should be called with ubi->volumes_lock held. + */ +void ubi_update_reserved(struct ubi_device *ubi) +{ + int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; + + if (need <= 0 || ubi->avail_pebs == 0) + return; + + need = min_t(int, need, ubi->avail_pebs); + ubi->avail_pebs -= need; + ubi->rsvd_pebs += need; + ubi->beb_rsvd_pebs += need; + ubi_msg("reserved more %d PEBs for bad PEB handling", need); +} + +/** + * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad * eraseblock handling. * @ubi: UBI device description object */ diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index a1a81c9ea8c..84f66e3fa05 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -647,6 +647,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); int ubi_check_volume(struct ubi_device *ubi, int vol_id); +void ubi_update_reserved(struct ubi_device *ubi); void ubi_calculate_reserved(struct ubi_device *ubi); int ubi_check_pattern(const void *buf, uint8_t patt, int size); diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 0669cff8ac3..9169e58c262 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -443,15 +443,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= reserved_pebs; ubi->avail_pebs += reserved_pebs; - i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; - if (i > 0) { - i = ubi->avail_pebs >= i ? i : ubi->avail_pebs; - ubi->avail_pebs -= i; - ubi->rsvd_pebs += i; - ubi->beb_rsvd_pebs += i; - if (i > 0) - ubi_msg("reserve more %d PEBs", i); - } + ubi_update_reserved(ubi); ubi->vol_count -= 1; spin_unlock(&ubi->volumes_lock); @@ -558,15 +550,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs += pebs; ubi->avail_pebs -= pebs; - pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; - if (pebs > 0) { - pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs; - ubi->avail_pebs -= pebs; - ubi->rsvd_pebs += pebs; - ubi->beb_rsvd_pebs += pebs; - if (pebs > 0) - ubi_msg("reserve more %d PEBs", pebs); - } + ubi_update_reserved(ubi); for (i = 0; i < reserved_pebs; i++) new_mapping[i] = vol->eba_tbl[i]; kfree(vol->eba_tbl); |