diff options
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/card/sdio_uart.c | 4 | ||||
-rw-r--r-- | drivers/mmc/host/Kconfig | 2 | ||||
-rw-r--r-- | drivers/mmc/host/mmc_spi.c | 4 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.c | 348 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.h | 15 | ||||
-rw-r--r-- | drivers/mmc/host/msm_sdcc.c | 40 | ||||
-rw-r--r-- | drivers/mmc/host/msm_sdcc.h | 1 | ||||
-rw-r--r-- | drivers/mmc/host/of_mmc_spi.c | 26 | ||||
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 36 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-of-core.c | 15 |
10 files changed, 418 insertions, 73 deletions
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index a0716967b7c..c8c9edb3d7c 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -956,7 +956,7 @@ static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state) return 0; } -static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) +static int sdio_uart_tiocmget(struct tty_struct *tty) { struct sdio_uart_port *port = tty->driver_data; int result; @@ -970,7 +970,7 @@ static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) return result; } -static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file, +static int sdio_uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct sdio_uart_port *port = tty->driver_data; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 398d7d54c52..1a21c6427a1 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -225,7 +225,7 @@ config MMC_OMAP config MMC_OMAP_HS tristate "TI OMAP High Speed Multimedia Card Interface support" - depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 + depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 help This selects the TI OMAP High Speed Multimedia card Interface. If you have an OMAP2430 or OMAP3 board or OMAP4 board with a diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index fd877f633dd..2f7fc0c5146 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1516,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi) return 0; } -#if defined(CONFIG_OF) static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { { .compatible = "mmc-spi-slot", }, {}, }; -#endif static struct spi_driver mmc_spi_driver = { .driver = { .name = "mmc_spi", .bus = &spi_bus_type, .owner = THIS_MODULE, -#if defined(CONFIG_OF) .of_match_table = mmc_spi_of_match_table, -#endif }, .probe = mmc_spi_probe, .remove = __devexit_p(mmc_spi_remove), diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 2d6de3e03e2..5bbb87d1025 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2,7 +2,7 @@ * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver * * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * Copyright (C) 2010 ST-Ericsson AB. + * Copyright (C) 2010 ST-Ericsson SA * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,8 +25,10 @@ #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/gpio.h> -#include <linux/amba/mmci.h> #include <linux/regulator/consumer.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/amba/mmci.h> #include <asm/div64.h> #include <asm/io.h> @@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) host->mrq = NULL; host->cmd = NULL; - if (mrq->data) - mrq->data->bytes_xfered = host->data_xfered; - /* * Need to drop the host lock here; mmc_request_done may call * back into the driver... @@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); } +/* + * All the DMA operation mode stuff goes inside this ifdef. + * This assumes that you have a generic DMA device interface, + * no custom DMA interfaces are supported. + */ +#ifdef CONFIG_DMA_ENGINE +static void __devinit mmci_dma_setup(struct mmci_host *host) +{ + struct mmci_platform_data *plat = host->plat; + const char *rxname, *txname; + dma_cap_mask_t mask; + + if (!plat || !plat->dma_filter) { + dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); + return; + } + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* + * If only an RX channel is specified, the driver will + * attempt to use it bidirectionally, however if it is + * is specified but cannot be located, DMA will be disabled. + */ + if (plat->dma_rx_param) { + host->dma_rx_channel = dma_request_channel(mask, + plat->dma_filter, + plat->dma_rx_param); + /* E.g if no DMA hardware is present */ + if (!host->dma_rx_channel) + dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); + } + + if (plat->dma_tx_param) { + host->dma_tx_channel = dma_request_channel(mask, + plat->dma_filter, + plat->dma_tx_param); + if (!host->dma_tx_channel) + dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); + } else { + host->dma_tx_channel = host->dma_rx_channel; + } + + if (host->dma_rx_channel) + rxname = dma_chan_name(host->dma_rx_channel); + else + rxname = "none"; + + if (host->dma_tx_channel) + txname = dma_chan_name(host->dma_tx_channel); + else + txname = "none"; + + dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", + rxname, txname); + + /* + * Limit the maximum segment size in any SG entry according to + * the parameters of the DMA engine device. + */ + if (host->dma_tx_channel) { + struct device *dev = host->dma_tx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + if (host->dma_rx_channel) { + struct device *dev = host->dma_rx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } +} + +/* + * This is used in __devinit or __devexit so inline it + * so it can be discarded. + */ +static inline void mmci_dma_release(struct mmci_host *host) +{ + struct mmci_platform_data *plat = host->plat; + + if (host->dma_rx_channel) + dma_release_channel(host->dma_rx_channel); + if (host->dma_tx_channel && plat->dma_tx_param) + dma_release_channel(host->dma_tx_channel); + host->dma_rx_channel = host->dma_tx_channel = NULL; +} + +static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ + struct dma_chan *chan = host->dma_current; + enum dma_data_direction dir; + u32 status; + int i; + + /* Wait up to 1ms for the DMA to complete */ + for (i = 0; ; i++) { + status = readl(host->base + MMCISTATUS); + if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) + break; + udelay(10); + } + + /* + * Check to see whether we still have some data left in the FIFO - + * this catches DMA controllers which are unable to monitor the + * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- + * contiguous buffers. On TX, we'll get a FIFO underrun error. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dmaengine_terminate_all(chan); + if (!data->error) + data->error = -EIO; + } + + if (data->flags & MMC_DATA_WRITE) { + dir = DMA_TO_DEVICE; + } else { + dir = DMA_FROM_DEVICE; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + + /* + * Use of DMA with scatter-gather is impossible. + * Give up with DMA and switch back to PIO mode. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); + mmci_dma_release(host); + } +} + +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); +} + +static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ + struct variant_data *variant = host->variant; + struct dma_slave_config conf = { + .src_addr = host->phybase + MMCIFIFO, + .dst_addr = host->phybase + MMCIFIFO, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ + .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ + }; + struct mmc_data *data = host->data; + struct dma_chan *chan; + struct dma_device *device; + struct dma_async_tx_descriptor *desc; + int nr_sg; + + host->dma_current = NULL; + + if (data->flags & MMC_DATA_READ) { + conf.direction = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + conf.direction = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + /* If there's no DMA channel, fall back to PIO */ + if (!chan) + return -EINVAL; + + /* If less than or equal to the fifo size, don't bother with DMA */ + if (host->size <= variant->fifosize) + return -EINVAL; + + device = chan->device; + nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); + if (nr_sg == 0) + return -EINVAL; + + dmaengine_slave_config(chan, &conf); + desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, + conf.direction, DMA_CTRL_ACK); + if (!desc) + goto unmap_exit; + + /* Okay, go for it. */ + host->dma_current = chan; + + dev_vdbg(mmc_dev(host->mmc), + "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", + data->sg_len, data->blksz, data->blocks, data->flags); + dmaengine_submit(desc); + dma_async_issue_pending(chan); + + datactrl |= MCI_DPSM_DMAENABLE; + + /* Trigger the DMA transfer */ + writel(datactrl, host->base + MMCIDATACTRL); + + /* + * Let the MMCI say when the data is ended and it's time + * to fire next DMA request. When that happens, MMCI will + * call mmci_data_end() + */ + writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, + host->base + MMCIMASK0); + return 0; + +unmap_exit: + dmaengine_terminate_all(chan); + dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); + return -ENOMEM; +} +#else +/* Blank functions if the DMA engine is not available */ +static inline void mmci_dma_setup(struct mmci_host *host) +{ +} + +static inline void mmci_dma_release(struct mmci_host *host) +{ +} + +static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ +} + +static inline void mmci_dma_data_error(struct mmci_host *host) +{ +} + +static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ + return -ENOSYS; +} +#endif + static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) { struct variant_data *variant = host->variant; @@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) host->data = data; host->size = data->blksz * data->blocks; - host->data_xfered = 0; - - mmci_init_sg(host, data); + data->bytes_xfered = 0; clks = (unsigned long long)data->timeout_ns * host->cclk; do_div(clks, 1000000000UL); @@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) BUG_ON(1 << blksz_bits != data->blksz); datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; - if (data->flags & MMC_DATA_READ) { + + if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; + + /* + * Attempt to use DMA operation mode, if this + * should fail, fall back to PIO mode + */ + if (!mmci_dma_start_data(host, datactrl)) + return; + + /* IRQ mode, map the SG list for CPU reading/writing */ + mmci_init_sg(host, data); + + if (data->flags & MMC_DATA_READ) { irqmask = MCI_RXFIFOHALFFULLMASK; /* - * If we have less than a FIFOSIZE of bytes to transfer, - * trigger a PIO interrupt as soon as any data is available. + * If we have less than the fifo 'half-full' threshold to + * transfer, trigger a PIO interrupt as soon as any data + * is available. */ - if (host->size < variant->fifosize) + if (host->size < variant->fifohalfsize) irqmask |= MCI_RXDATAAVLBLMASK; } else { /* @@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; - /* Calculate how far we are into the transfer */ + /* Terminate the DMA transfer */ + if (dma_inprogress(host)) + mmci_dma_data_error(host); + + /* + * Calculate how far we are into the transfer. Note that + * the data counter gives the number of bytes transferred + * on the MMC bus, not on the host side. On reads, this + * can be as much as a FIFO-worth of data ahead. This + * matters for FIFO overruns only. + */ remain = readl(host->base + MMCIDATACNT); success = data->blksz * data->blocks - remain; - dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); + dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", + status, success); if (status & MCI_DATACRCFAIL) { /* Last block was not successful */ - host->data_xfered = round_down(success - 1, data->blksz); + success -= 1; data->error = -EILSEQ; } else if (status & MCI_DATATIMEOUT) { - host->data_xfered = round_down(success, data->blksz); data->error = -ETIMEDOUT; - } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) { - host->data_xfered = round_down(success, data->blksz); + } else if (status & MCI_TXUNDERRUN) { + data->error = -EIO; + } else if (status & MCI_RXOVERRUN) { + if (success > host->variant->fifosize) + success -= host->variant->fifosize; + else + success = 0; data->error = -EIO; } - - /* - * We hit an error condition. Ensure that any data - * partially written to a page is properly coherent. - */ - if (data->flags & MMC_DATA_READ) { - struct sg_mapping_iter *sg_miter = &host->sg_miter; - unsigned long flags; - - local_irq_save(flags); - if (sg_miter_next(sg_miter)) { - flush_dcache_page(sg_miter->page); - sg_miter_stop(sg_miter); - } - local_irq_restore(flags); - } + data->bytes_xfered = round_down(success, data->blksz); } if (status & MCI_DATABLOCKEND) dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); if (status & MCI_DATAEND || data->error) { + if (dma_inprogress(host)) + mmci_dma_unmap(host, data); mmci_stop_data(host); if (!data->error) /* The error clause is handled above, success! */ - host->data_xfered += data->blksz * data->blocks; + data->bytes_xfered = data->blksz * data->blocks; if (!data->stop) { mmci_request_end(host, data->mrq); @@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) if (remain) break; - if (status & MCI_RXACTIVE) - flush_dcache_page(sg_miter->page); - status = readl(base + MMCISTATUS); } while (1); @@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) local_irq_restore(flags); /* - * If we're nearing the end of the read, switch to - * "any data available" mode. + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. */ - if (status & MCI_RXACTIVE && host->size < variant->fifosize) + if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); /* @@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = { .get_cd = mmci_get_cd, }; -static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) +static int __devinit mmci_probe(struct amba_device *dev, + const struct amba_id *id) { struct mmci_platform_data *plat = dev->dev.platform_data; struct variant_data *variant = id->data; @@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } + host->phybase = dev->res.start; host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { ret = -ENOMEM; @@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) amba_set_drvdata(dev, mmc); - dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", - mmc_hostname(mmc), amba_part(dev), amba_rev(dev), - (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); + dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", + mmc_hostname(mmc), amba_part(dev), amba_manf(dev), + amba_rev(dev), (unsigned long long)dev->res.start, + dev->irq[0], dev->irq[1]); + + mmci_dma_setup(host); mmc_add_host(mmc); @@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev) writel(0, host->base + MMCICOMMAND); writel(0, host->base + MMCIDATACTRL); + mmci_dma_release(host); free_irq(dev->irq[0], host); if (!host->singleirq) free_irq(dev->irq[1], host); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index c1df7b82d36..ec9a7bc6d0d 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -148,8 +148,10 @@ struct clk; struct variant_data; +struct dma_chan; struct mmci_host { + phys_addr_t phybase; void __iomem *base; struct mmc_request *mrq; struct mmc_command *cmd; @@ -161,8 +163,6 @@ struct mmci_host { int gpio_cd_irq; bool singleirq; - unsigned int data_xfered; - spinlock_t lock; unsigned int mclk; @@ -181,5 +181,16 @@ struct mmci_host { struct sg_mapping_iter sg_miter; unsigned int size; struct regulator *vcc; + +#ifdef CONFIG_DMA_ENGINE + /* DMA stuff */ + struct dma_chan *dma_current; + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + +#define dma_inprogress(host) ((host)->dma_current) +#else +#define dma_inprogress(host) (0) +#endif }; diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 67b08819556..a4c865a5286 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -36,6 +36,7 @@ #include <linux/io.h> #include <linux/memory.h> #include <linux/gfp.h> +#include <linux/gpio.h> #include <asm/cacheflush.h> #include <asm/div64.h> @@ -933,6 +934,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) spin_unlock_irqrestore(&host->lock, flags); } +static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable) +{ + struct msm_mmc_gpio_data *curr; + int i, rc = 0; + + if (!host->plat->gpio_data && host->gpio_config_status == enable) + return; + + curr = host->plat->gpio_data; + for (i = 0; i < curr->size; i++) { + if (enable) { + rc = gpio_request(curr->gpio[i].no, + curr->gpio[i].name); + if (rc) { + pr_err("%s: gpio_request(%d, %s) failed %d\n", + mmc_hostname(host->mmc), + curr->gpio[i].no, + curr->gpio[i].name, rc); + goto free_gpios; + } + } else { + gpio_free(curr->gpio[i].no); + } + } + host->gpio_config_status = enable; + return; + +free_gpios: + for (; i >= 0; i--) + gpio_free(curr->gpio[i].no); +} + static void msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -945,6 +978,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) msmsdcc_enable_clocks(host); + spin_unlock_irqrestore(&host->lock, flags); + if (ios->clock) { if (ios->clock != host->clk_rate) { rc = clk_set_rate(host->clk, ios->clock); @@ -971,9 +1006,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: + msmsdcc_setup_gpio(host, false); break; case MMC_POWER_UP: pwr |= MCI_PWR_UP; + msmsdcc_setup_gpio(host, true); break; case MMC_POWER_ON: pwr |= MCI_PWR_ON; @@ -990,9 +1027,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) msmsdcc_writel(host, pwr, MMCIPOWER); } #if BUSCLK_PWRSAVE + spin_lock_irqsave(&host->lock, flags); msmsdcc_disable_clocks(host, 1); -#endif spin_unlock_irqrestore(&host->lock, flags); +#endif } static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h index 939557af266..42d7bbc977c 100644 --- a/drivers/mmc/host/msm_sdcc.h +++ b/drivers/mmc/host/msm_sdcc.h @@ -243,6 +243,7 @@ struct msmsdcc_host { unsigned int cmd_datactrl; struct mmc_command *cmd_cmd; u32 cmd_c; + bool gpio_config_status; bool prog_scan; bool prog_enable; diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 1247e5de9fa..5530def54e5 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -34,6 +34,7 @@ enum { struct of_mmc_spi { int gpios[NUM_GPIOS]; bool alow_gpios[NUM_GPIOS]; + int detect_irq; struct mmc_spi_platform_data pdata; }; @@ -61,6 +62,22 @@ static int of_mmc_spi_get_ro(struct device *dev) return of_mmc_spi_read_gpio(dev, WP_GPIO); } +static int of_mmc_spi_init(struct device *dev, + irqreturn_t (*irqhandler)(int, void *), void *mmc) +{ + struct of_mmc_spi *oms = to_of_mmc_spi(dev); + + return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0, + dev_name(dev), mmc); +} + +static void of_mmc_spi_exit(struct device *dev, void *mmc) +{ + struct of_mmc_spi *oms = to_of_mmc_spi(dev); + + free_irq(oms->detect_irq, mmc); +} + struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) { struct device *dev = &spi->dev; @@ -121,8 +138,13 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) if (gpio_is_valid(oms->gpios[WP_GPIO])) oms->pdata.get_ro = of_mmc_spi_get_ro; - /* We don't support interrupts yet, let's poll. */ - oms->pdata.caps |= MMC_CAP_NEEDS_POLL; + oms->detect_irq = irq_of_parse_and_map(np, 0); + if (oms->detect_irq != NO_IRQ) { + oms->pdata.init = of_mmc_spi_init; + oms->pdata.exit = of_mmc_spi_exit; + } else { + oms->pdata.caps |= MMC_CAP_NEEDS_POLL; + } dev->platform_data = &oms->pdata; return dev->platform_data; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 078fdf11af0..158c0ee53b2 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -118,7 +118,7 @@ #define MMC_TIMEOUT_MS 20 #define OMAP_MMC_MASTER_CLOCK 96000000 -#define DRIVER_NAME "mmci-omap-hs" +#define DRIVER_NAME "omap_hsmmc" /* Timeouts for entering power saving states on inactivity, msec */ #define OMAP_MMC_DISABLED_TIMEOUT 100 @@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, return ret; } -static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, +static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, int vdd) { struct omap_hsmmc_host *host = @@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, return ret; } +static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, + int vdd) +{ + return 0; +} + static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, int vdd, int cardsleep) { @@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, return regulator_set_mode(host->vcc, mode); } -static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, +static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, int vdd, int cardsleep) { struct omap_hsmmc_host *host = @@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, return regulator_enable(host->vcc_aux); } +static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, + int vdd, int cardsleep) +{ + return 0; +} + static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) { struct regulator *reg; @@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) break; case OMAP_MMC2_DEVID: case OMAP_MMC3_DEVID: + case OMAP_MMC5_DEVID: /* Off-chip level shifting, or none */ - mmc_slot(host).set_power = omap_hsmmc_23_set_power; - mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; + mmc_slot(host).set_power = omap_hsmmc_235_set_power; + mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; break; + case OMAP_MMC4_DEVID: + mmc_slot(host).set_power = omap_hsmmc_4_set_power; + mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; default: pr_err("MMC%d configuration not supported!\n", host->id); return -EINVAL; @@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; } - if (host->id == OMAP_MMC1_DEVID) { + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { /* Only MMC1 can interface at 3V without some flavor * of external transceiver; but they all handle 1.8V. */ @@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) u32 hctl, capa, value; /* Only MMC1 supports 3.0V */ - if (host->id == OMAP_MMC1_DEVID) { + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { hctl = SDVS30; capa = VS30 | VS18; } else { @@ -2101,14 +2117,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) /* we start off in DISABLED state */ host->dpm_state = DISABLED; - if (mmc_host_enable(host->mmc) != 0) { + if (clk_enable(host->iclk) != 0) { clk_put(host->iclk); clk_put(host->fclk); goto err1; } - if (clk_enable(host->iclk) != 0) { - mmc_host_disable(host->mmc); + if (mmc_host_enable(host->mmc) != 0) { + clk_disable(host->iclk); clk_put(host->iclk); clk_put(host->fclk); goto err1; diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index dd84124f420..f9b611fc773 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -124,17 +124,20 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) #endif } -static int __devinit sdhci_of_probe(struct platform_device *ofdev, - const struct of_device_id *match) +static int __devinit sdhci_of_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; - struct sdhci_of_data *sdhci_of_data = match->data; + struct sdhci_of_data *sdhci_of_data; struct sdhci_host *host; struct sdhci_of_host *of_host; const __be32 *clk; int size; int ret; + if (!ofdev->dev.of_match) + return -EINVAL; + sdhci_of_data = ofdev->dev.of_match->data; + if (!of_device_is_available(np)) return -ENODEV; @@ -217,7 +220,7 @@ static const struct of_device_id sdhci_of_match[] = { }; MODULE_DEVICE_TABLE(of, sdhci_of_match); -static struct of_platform_driver sdhci_of_driver = { +static struct platform_driver sdhci_of_driver = { .driver = { .name = "sdhci-of", .owner = THIS_MODULE, @@ -231,13 +234,13 @@ static struct of_platform_driver sdhci_of_driver = { static int __init sdhci_of_init(void) { - return of_register_platform_driver(&sdhci_of_driver); + return platform_driver_register(&sdhci_of_driver); } module_init(sdhci_of_init); static void __exit sdhci_of_exit(void) { - of_unregister_platform_driver(&sdhci_of_driver); + platform_driver_unregister(&sdhci_of_driver); } module_exit(sdhci_of_exit); |