diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-01-31 15:08:43 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-01-31 15:09:14 +0100 |
commit | 51563cd53c4b1c1790fccd2e0af0e2b756589af9 (patch) | |
tree | d2fedfc654ab4fa011feaca262f95481a89e232a /drivers/mmc | |
parent | d123375425d7df4b6081a631fc1203fceafa59b2 (diff) | |
parent | 8161239a8bcce9ad6b537c04a1fa3b5c68bae693 (diff) |
Merge branch 'tip/rtmutex' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into core/locking
*git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace tip/rtmutex:
rtmutex: Simplify PI algorithm and make highest prio task get lock
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/host/mmci.c | 98 | ||||
-rw-r--r-- | drivers/mmc/host/mmci.h | 5 | ||||
-rw-r--r-- | drivers/mmc/host/msm_sdcc.c | 52 |
3 files changed, 43 insertions, 112 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 56302282566..2de12fe155d 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -46,10 +46,6 @@ static unsigned int fmax = 515633; * is asserted (likewise for RX) * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY * is asserted (likewise for RX) - * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware - * and will not work at all. - * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when - * using DMA. * @sdio: variant supports SDIO * @st_clkdiv: true if using a ST-specific clock divider algorithm */ @@ -59,8 +55,6 @@ struct variant_data { unsigned int datalength_bits; unsigned int fifosize; unsigned int fifohalfsize; - bool broken_blockend; - bool broken_blockend_dma; bool sdio; bool st_clkdiv; }; @@ -76,7 +70,6 @@ static struct variant_data variant_u300 = { .fifohalfsize = 8 * 4, .clkreg_enable = 1 << 13, /* HWFCEN */ .datalength_bits = 16, - .broken_blockend_dma = true, .sdio = true, }; @@ -86,7 +79,6 @@ static struct variant_data variant_ux500 = { .clkreg = MCI_CLK_ENABLE, .clkreg_enable = 1 << 14, /* HWFCEN */ .datalength_bits = 24, - .broken_blockend = true, .sdio = true, .st_clkdiv = true, }; @@ -210,8 +202,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) host->data = data; host->size = data->blksz * data->blocks; host->data_xfered = 0; - host->blockend = false; - host->dataend = false; mmci_init_sg(host, data); @@ -288,21 +278,26 @@ static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { - struct variant_data *variant = host->variant; - /* First check for errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { + u32 remain, success; + + /* Calculate how far we are into the transfer */ + remain = readl(host->base + MMCIDATACNT) << 2; + success = data->blksz * data->blocks - remain; + dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); - if (status & MCI_DATACRCFAIL) + if (status & MCI_DATACRCFAIL) { + /* Last block was not successful */ + host->data_xfered = ((success / data->blksz) - 1 * data->blksz); data->error = -EILSEQ; - else if (status & MCI_DATATIMEOUT) + } else if (status & MCI_DATATIMEOUT) { + host->data_xfered = success; data->error = -ETIMEDOUT; - else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) + } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) { + host->data_xfered = success; data->error = -EIO; - - /* Force-complete the transaction */ - host->blockend = true; - host->dataend = true; + } /* * We hit an error condition. Ensure that any data @@ -321,61 +316,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, } } - /* - * On ARM variants in PIO mode, MCI_DATABLOCKEND - * is always sent first, and we increase the - * transfered number of bytes for that IRQ. Then - * MCI_DATAEND follows and we conclude the transaction. - * - * On the Ux500 single-IRQ variant MCI_DATABLOCKEND - * doesn't seem to immediately clear from the status, - * so we can't use it keep count when only one irq is - * used because the irq will hit for other reasons, and - * then the flag is still up. So we use the MCI_DATAEND - * IRQ at the end of the entire transfer because - * MCI_DATABLOCKEND is broken. - * - * In the U300, the IRQs can arrive out-of-order, - * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND, - * so for this case we use the flags "blockend" and - * "dataend" to make sure both IRQs have arrived before - * concluding the transaction. (This does not apply - * to the Ux500 which doesn't fire MCI_DATABLOCKEND - * at all.) In DMA mode it suffers from the same problem - * as the Ux500. - */ - if (status & MCI_DATABLOCKEND) { - /* - * Just being a little over-cautious, we do not - * use this progressive update if the hardware blockend - * flag is unreliable: since it can stay high between - * IRQs it will corrupt the transfer counter. - */ - if (!variant->broken_blockend) - host->data_xfered += data->blksz; - host->blockend = true; - } - - if (status & MCI_DATAEND) - host->dataend = true; + if (status & MCI_DATABLOCKEND) + dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); - /* - * On variants with broken blockend we shall only wait for dataend, - * on others we must sync with the blockend signal since they can - * appear out-of-order. - */ - if (host->dataend && (host->blockend || variant->broken_blockend)) { + if (status & MCI_DATAEND) { mmci_stop_data(host); - /* Reset these flags */ - host->blockend = false; - host->dataend = false; - - /* - * Variants with broken blockend flags need to handle the - * end of the entire transfer here. - */ - if (variant->broken_blockend && !data->error) + if (!data->error) + /* The error clause is handled above, success! */ host->data_xfered += data->blksz * data->blocks; if (!data->stop) { @@ -770,7 +718,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) struct variant_data *variant = id->data; struct mmci_host *host; struct mmc_host *mmc; - unsigned int mask; int ret; /* must have platform data */ @@ -951,12 +898,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) goto irq0_free; } - mask = MCI_IRQENABLE; - /* Don't use the datablockend flag if it's broken */ - if (variant->broken_blockend) - mask &= ~MCI_DATABLOCKEND; - - writel(mask, host->base + MMCIMASK0); + writel(MCI_IRQENABLE, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index df06f01aac8..c1df7b82d36 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -137,7 +137,7 @@ #define MCI_IRQENABLE \ (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ - MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) + MCI_CMDRESPENDMASK|MCI_CMDSENTMASK) /* These interrupts are directed to IRQ1 when two IRQ lines are available */ #define MCI_IRQ1MASK \ @@ -177,9 +177,6 @@ struct mmci_host { struct timer_list timer; unsigned int oldstat; - bool blockend; - bool dataend; - /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 5decfd0bd61..153ab977a01 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -383,14 +383,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) host->curr.user_pages = 0; box = &nc->cmd[0]; - for (i = 0; i < host->dma.num_ents; i++) { - box->cmd = CMD_MODE_BOX; - /* Initialize sg dma address */ - sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) - + sg->offset; + /* location of command block must be 64 bit aligned */ + BUG_ON(host->dma.cmd_busaddr & 0x07); - if (i == (host->dma.num_ents - 1)) + nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; + host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); + host->dma.hdr.complete_func = msmsdcc_dma_complete_func; + + n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, + host->dma.num_ents, host->dma.dir); + if (n == 0) { + printk(KERN_ERR "%s: Unable to map in all sg elements\n", + mmc_hostname(host->mmc)); + host->dma.sg = NULL; + host->dma.num_ents = 0; + return -ENOMEM; + } + + for_each_sg(host->dma.sg, sg, n, i) { + + box->cmd = CMD_MODE_BOX; + + if (i == n - 1) box->cmd |= CMD_LC; rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : @@ -418,27 +434,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) box->cmd |= CMD_DST_CRCI(crci); } box++; - sg++; - } - - /* location of command block must be 64 bit aligned */ - BUG_ON(host->dma.cmd_busaddr & 0x07); - - nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; - host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | - DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); - host->dma.hdr.complete_func = msmsdcc_dma_complete_func; - - n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, - host->dma.num_ents, host->dma.dir); -/* dsb inside dma_map_sg will write nc out to mem as well */ - - if (n != host->dma.num_ents) { - printk(KERN_ERR "%s: Unable to map in all sg elements\n", - mmc_hostname(host->mmc)); - host->dma.sg = NULL; - host->dma.num_ents = 0; - return -ENOMEM; } return 0; @@ -1331,9 +1326,6 @@ msmsdcc_probe(struct platform_device *pdev) if (host->timer.function) pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); -#if BUSCLK_PWRSAVE - msmsdcc_disable_clocks(host, 1); -#endif return 0; cmd_irq_free: free_irq(cmd_irqres->start, host); |