diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 52 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 15 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 21 | ||||
-rw-r--r-- | drivers/dma/coh901318.c | 2 | ||||
-rw-r--r-- | drivers/dma/coh901318_lli.c | 4 | ||||
-rw-r--r-- | drivers/dma/dw_dmac.c | 26 | ||||
-rw-r--r-- | drivers/dma/imx-dma.c | 12 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 68 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/mxs-dma.c | 6 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 1 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 2 |
13 files changed, 143 insertions, 76 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 3d704abd791..49ecbbb8932 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver; * struct vendor_data - vendor-specific config parameters for PL08x derivatives * @channels: the number of channels available in this variant * @dualmaster: whether this version supports dual AHB masters or not. + * @nomadik: whether the channels have Nomadik security extension bits + * that need to be checked for permission before use and some registers are + * missing */ struct vendor_data { u8 channels; bool dualmaster; + bool nomadik; }; /* @@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, spin_lock_irqsave(&ch->lock, flags); - if (!ch->serving) { + if (!ch->locked && !ch->serving) { ch->serving = virt_chan; ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); @@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( int ret, tmp; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", - __func__, sgl->length, plchan->name); + __func__, sg_dma_len(sgl), plchan->name); txd = pl08x_get_txd(plchan, flags); if (!txd) { @@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( dsg->len = sg_dma_len(sg); if (direction == DMA_MEM_TO_DEV) { - dsg->src_addr = sg_phys(sg); + dsg->src_addr = sg_dma_address(sg); dsg->dst_addr = slave_addr; } else { dsg->src_addr = slave_addr; - dsg->dst_addr = sg_phys(sg); + dsg->dst_addr = sg_dma_address(sg); } } @@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) */ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) { + /* The Nomadik variant does not have the config register */ + if (pl08x->vd->nomadik) + return; writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } @@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) __func__, err); writel(err, pl08x->base + PL080_ERR_CLEAR); } - tc = readl(pl08x->base + PL080_INT_STATUS); + tc = readl(pl08x->base + PL080_TC_STATUS); if (tc) writel(tc, pl08x->base + PL080_TC_CLEAR); @@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) spin_lock_irqsave(&ch->lock, flags); virt_chan = ch->serving; - seq_printf(s, "%d\t\t%s\n", - ch->id, virt_chan ? virt_chan->name : "(none)"); + seq_printf(s, "%d\t\t%s%s\n", + ch->id, + virt_chan ? virt_chan->name : "(none)", + ch->locked ? " LOCKED" : ""); spin_unlock_irqrestore(&ch->lock, flags); } @@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Initialize physical channels */ - pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), + pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { dev_err(&adev->dev, "%s failed to allocate " @@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->serving = NULL; ch->signal = -1; + + /* + * Nomadik variants can have channels that are locked + * down for the secure world only. Lock up these channels + * by perpetually serving a dummy virtual channel. + */ + if (vd->nomadik) { + u32 val; + + val = readl(ch->base + PL080_CH_CONFIG); + if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { + dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); + ch->locked = true; + } + } + dev_dbg(&adev->dev, "physical channel %d is %s\n", i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); } @@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = { .dualmaster = true, }; +static struct vendor_data vendor_nomadik = { + .channels = 8, + .dualmaster = true, + .nomadik = true, +}; + static struct vendor_data vendor_pl081 = { .channels = 2, .dualmaster = false, @@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = { }, /* Nomadik 8815 PL080 variant */ { - .id = 0x00280880, + .id = 0x00280080, .mask = 0x00ffffff, - .data = &vendor_pl080, + .data = &vendor_nomadik, }, { 0, 0 }, }; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index bf0d7e4e345..7292aa87b2d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -39,7 +39,6 @@ */ #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) -#define ATC_DEFAULT_CTRLA (0) #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |ATC_DIF(AT_DMA_MEM_IF)) @@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - ctrla = ATC_DEFAULT_CTRLA; ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | ATC_SRC_ADDR_MODE_INCR | ATC_DST_ADDR_MODE_INCR @@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * of the most common optimization. */ if (!((src | dest | len) & 3)) { - ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; + ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; src_width = dst_width = 2; } else if (!((src | dest | len) & 1)) { - ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; + ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; src_width = dst_width = 1; } else { - ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; + ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; src_width = dst_width = 0; } @@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } - ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; + ctrla = ATC_SCSIZE(sconfig->src_maxburst) + | ATC_DCSIZE(sconfig->dst_maxburst); ctrlb = ATC_IEN; switch (direction) { @@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, enum dma_transfer_direction direction) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_dma_slave *atslave = chan->private; struct dma_slave_config *sconfig = &atchan->dma_sconfig; u32 ctrla; /* prepare common CRTLA value */ - ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla + ctrla = ATC_SCSIZE(sconfig->src_maxburst) + | ATC_DCSIZE(sconfig->dst_maxburst) | ATC_DST_WIDTH(reg_width) | ATC_SRC_WIDTH(reg_width) | period_len >> reg_width; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 897a8bcaec9..8a6c8e8b294 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -87,7 +87,26 @@ /* Bitfields in CTRLA */ #define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ #define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ -/* Chunck Tranfer size definitions are in at_hdmac.h */ +#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */ +#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16)) +#define ATC_SCSIZE_1 (0x0 << 16) +#define ATC_SCSIZE_4 (0x1 << 16) +#define ATC_SCSIZE_8 (0x2 << 16) +#define ATC_SCSIZE_16 (0x3 << 16) +#define ATC_SCSIZE_32 (0x4 << 16) +#define ATC_SCSIZE_64 (0x5 << 16) +#define ATC_SCSIZE_128 (0x6 << 16) +#define ATC_SCSIZE_256 (0x7 << 16) +#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */ +#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20)) +#define ATC_DCSIZE_1 (0x0 << 20) +#define ATC_DCSIZE_4 (0x1 << 20) +#define ATC_DCSIZE_8 (0x2 << 20) +#define ATC_DCSIZE_16 (0x3 << 20) +#define ATC_DCSIZE_32 (0x4 << 20) +#define ATC_DCSIZE_64 (0x5 << 20) +#define ATC_DCSIZE_128 (0x6 << 20) +#define ATC_DCSIZE_256 (0x7 << 20) #define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ #define ATC_SRC_WIDTH(x) ((x) << 24) #define ATC_SRC_WIDTH_BYTE (0x0 << 24) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 750925f9638..e67b4e06a91 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!sgl) goto out; - if (sgl->length == 0) + if (sg_dma_len(sgl) == 0) goto out; spin_lock_irqsave(&cohc->lock, flg); diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 6c0e2d4c668..780e0429b38 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, if (dir == DMA_MEM_TO_DEV) /* increment source address */ - src = sg_phys(sg); + src = sg_dma_address(sg); else /* increment destination address */ - dst = sg_phys(sg); + dst = sg_dma_address(sg); bytes_to_transfer = sg_dma_len(sg); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 7439079f5ee..e23dc82d43a 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dw_desc *desc; u32 len, dlen, mem; - mem = sg_phys(sg); + mem = sg_dma_address(sg); len = sg_dma_len(sg); if (!((mem | len) & 7)) @@ -809,7 +810,7 @@ slave_sg_todev_fill_desc: struct dw_desc *desc; u32 len, dlen, mem; - mem = sg_phys(sg); + mem = sg_dma_address(sg); len = sg_dma_len(sg); if (!((mem | len) & 7)) @@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev) err = PTR_ERR(dw->clk); goto err_clk; } - clk_enable(dw->clk); + clk_prepare_enable(dw->clk); /* force dma off, just in case */ dw_dma_off(dw); @@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev) return 0; err_irq: - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); clk_put(dw->clk); err_clk: iounmap(dw->regs); @@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev) channel_clear_bit(dw, CH_EN, dwc->mask); } - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); clk_put(dw->clk); iounmap(dw->regs); @@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev) struct dw_dma *dw = platform_get_drvdata(pdev); dw_dma_off(platform_get_drvdata(pdev)); - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); } static int dw_suspend_noirq(struct device *dev) @@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev) struct dw_dma *dw = platform_get_drvdata(pdev); dw_dma_off(platform_get_drvdata(pdev)); - clk_disable(dw->clk); + clk_disable_unprepare(dw->clk); return 0; } @@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct dw_dma *dw = platform_get_drvdata(pdev); - clk_enable(dw->clk); + clk_prepare_enable(dw->clk); dma_writel(dw, CFG, DW_CFG_DMA_EN); return 0; } @@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = { .poweroff_noirq = dw_suspend_noirq, }; +#ifdef CONFIG_OF +static const struct of_device_id dw_dma_id_table[] = { + { .compatible = "snps,dma-spear1340" }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_dma_id_table); +#endif + static struct platform_driver dw_driver = { .remove = __exit_p(dw_remove), .shutdown = dw_shutdown, .driver = { .name = "dw_dmac", .pm = &dw_dev_pm_ops, + .of_match_table = of_match_ptr(dw_dma_id_table), }, }; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index bb787d8e152..fcfeb3cd8d3 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) struct scatterlist *sg = d->sg; unsigned long now; - now = min(d->len, sg->length); + now = min(d->len, sg_dma_len(sg)); if (d->len != IMX_DMA_LENGTH_LOOP) d->len -= now; @@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); for_each_sg(sgl, sg, sg_len, i) { - dma_length += sg->length; + dma_length += sg_dma_len(sg); } switch (imxdmac->word_size) { case DMA_SLAVE_BUSWIDTH_4_BYTES: - if (sgl->length & 3 || sgl->dma_address & 3) + if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) return NULL; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: - if (sgl->length & 1 || sgl->dma_address & 1) + if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) return NULL; break; case DMA_SLAVE_BUSWIDTH_1_BYTE: @@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( imxdmac->sg_list[i].page_link = 0; imxdmac->sg_list[i].offset = 0; imxdmac->sg_list[i].dma_address = dma_addr; - imxdmac->sg_list[i].length = period_len; + sg_dma_len(&imxdmac->sg_list[i]) = period_len; dma_addr += period_len; } /* close the loop */ imxdmac->sg_list[periods].offset = 0; - imxdmac->sg_list[periods].length = 0; + sg_dma_len(&imxdmac->sg_list[periods]) = 0; imxdmac->sg_list[periods].page_link = ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d3e38e28bb6..a472a29d849 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -24,7 +24,7 @@ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/clk.h> -#include <linux/wait.h> +#include <linux/delay.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/spinlock.h> @@ -271,6 +271,7 @@ struct sdma_channel { enum dma_status status; unsigned int chn_count; unsigned int chn_real_count; + struct tasklet_struct tasklet; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -323,7 +324,7 @@ struct sdma_engine { dma_addr_t context_phys; struct dma_device dma_device; struct clk *clk; - struct mutex channel_0_lock; + spinlock_t channel_0_lock; struct sdma_script_start_addrs *script_addrs; }; @@ -401,19 +402,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) } /* - * sdma_run_channel - run a channel and wait till it's done + * sdma_run_channel0 - run a channel and wait till it's done */ -static int sdma_run_channel(struct sdma_channel *sdmac) +static int sdma_run_channel0(struct sdma_engine *sdma) { - struct sdma_engine *sdma = sdmac->sdma; - int channel = sdmac->channel; int ret; + unsigned long timeout = 500; - init_completion(&sdmac->done); + sdma_enable_channel(sdma, 0); - sdma_enable_channel(sdma, channel); + while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { + if (timeout-- <= 0) + break; + udelay(1); + } - ret = wait_for_completion_timeout(&sdmac->done, HZ); + if (ret) { + /* Clear the interrupt status */ + writel_relaxed(ret, sdma->regs + SDMA_H_INTR); + } else { + dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); + } return ret ? 0 : -ETIMEDOUT; } @@ -425,17 +434,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, void *buf_virt; dma_addr_t buf_phys; int ret; - - mutex_lock(&sdma->channel_0_lock); + unsigned long flags; buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); if (!buf_virt) { - ret = -ENOMEM; - goto err_out; + return -ENOMEM; } + spin_lock_irqsave(&sdma->channel_0_lock, flags); + bd0->mode.command = C0_SETPM; bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; bd0->mode.count = size / 2; @@ -444,12 +453,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, memcpy(buf_virt, buf, size); - ret = sdma_run_channel(&sdma->channel[0]); + ret = sdma_run_channel0(sdma); - dma_free_coherent(NULL, size, buf_virt, buf_phys); + spin_unlock_irqrestore(&sdma->channel_0_lock, flags); -err_out: - mutex_unlock(&sdma->channel_0_lock); + dma_free_coherent(NULL, size, buf_virt, buf_phys); return ret; } @@ -534,13 +542,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) sdmac->desc.callback(sdmac->desc.callback_param); } -static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) +static void sdma_tasklet(unsigned long data) { - complete(&sdmac->done); + struct sdma_channel *sdmac = (struct sdma_channel *) data; - /* not interested in channel 0 interrupts */ - if (sdmac->channel == 0) - return; + complete(&sdmac->done); if (sdmac->flags & IMX_DMA_SG_LOOP) sdma_handle_channel_loop(sdmac); @@ -554,13 +560,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) unsigned long stat; stat = readl_relaxed(sdma->regs + SDMA_H_INTR); + /* not interested in channel 0 interrupts */ + stat &= ~1; writel_relaxed(stat, sdma->regs + SDMA_H_INTR); while (stat) { int channel = fls(stat) - 1; struct sdma_channel *sdmac = &sdma->channel[channel]; - mxc_sdma_handle_channel(sdmac); + tasklet_schedule(&sdmac->tasklet); __clear_bit(channel, &stat); } @@ -659,6 +667,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) struct sdma_context_data *context = sdma->context; struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; int ret; + unsigned long flags; if (sdmac->direction == DMA_DEV_TO_MEM) { load_address = sdmac->pc_from_device; @@ -676,7 +685,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); - mutex_lock(&sdma->channel_0_lock); + spin_lock_irqsave(&sdma->channel_0_lock, flags); memset(context, 0, sizeof(*context)); context->channel_state.pc = load_address; @@ -695,10 +704,9 @@ static int sdma_load_context(struct sdma_channel *sdmac) bd0->mode.count = sizeof(*context) / 4; bd0->buffer_addr = sdma->context_phys; bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; + ret = sdma_run_channel0(sdma); - ret = sdma_run_channel(&sdma->channel[0]); - - mutex_unlock(&sdma->channel_0_lock); + spin_unlock_irqrestore(&sdma->channel_0_lock, flags); return ret; } @@ -938,7 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( bd->buffer_addr = sg->dma_address; - count = sg->length; + count = sg_dma_len(sg); if (count > 0xffff) { dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", @@ -1297,7 +1305,7 @@ static int __init sdma_probe(struct platform_device *pdev) if (!sdma) return -ENOMEM; - mutex_init(&sdma->channel_0_lock); + spin_lock_init(&sdma->channel_0_lock); sdma->dev = &pdev->dev; @@ -1359,6 +1367,8 @@ static int __init sdma_probe(struct platform_device *pdev) dma_cookie_init(&sdmac->chan); sdmac->channel = i; + tasklet_init(&sdmac->tasklet, sdma_tasklet, + (unsigned long) sdmac); /* * Add the channel to the DMAC list. Do not add channel 0 though * because we need it internally in the SDMA driver. This also means diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index c900ca7aaec..222e907bfaa 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, } } /*Populate CTL_HI values*/ - ctl_hi.ctlx.block_ts = get_block_ts(sg->length, + ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), desc->width, midc->dma->block_size); /*Populate SAR and DAR values*/ - sg_phy_addr = sg_phys(sg); + sg_phy_addr = sg_dma_address(sg); if (desc->dirn == DMA_MEM_TO_DEV) { lli_bloc_desc->sar = sg_phy_addr; lli_bloc_desc->dar = mids->dma_slave.dst_addr; @@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( txd = intel_mid_dma_prep_memcpy(chan, mids->dma_slave.dst_addr, mids->dma_slave.src_addr, - sgl->length, + sg_dma_len(sgl), flags); return txd; } else { @@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", sg_len, direction, flags); - txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); + txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); if (NULL == txd) { pr_err("MDMA: Prep memcpy failed\n"); return NULL; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 655d4ce6ed0..3db3a48d3f0 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -415,9 +415,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); } else { for_each_sg(sgl, sg, sg_len, i) { - if (sg->length > MAX_XFER_BYTES) { + if (sg_dma_len(sg) > MAX_XFER_BYTES) { dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", - sg->length, MAX_XFER_BYTES); + sg_dma_len(sg), MAX_XFER_BYTES); goto err_out; } @@ -425,7 +425,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; ccw->bufaddr = sg->dma_address; - ccw->xfer_bytes = sg->length; + ccw->xfer_bytes = sg_dma_len(sg); ccw->bits = 0; ccw->bits |= CCW_CHAIN; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 65c0495a6d4..987ab5cd261 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, goto err_desc_get; desc->regs.dev_addr = reg; - desc->regs.mem_addr = sg_phys(sg); + desc->regs.mem_addr = sg_dma_address(sg); desc->regs.size = sg_dma_len(sg); desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index fa3fb21e60b..cbcc28e79be 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -21,7 +21,6 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/amba/pl330.h> #include <linux/pm_runtime.h> diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 2ed1ac3513f..000d309602b 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, } sg[periods].offset = 0; - sg[periods].length = 0; + sg_dma_len(&sg[periods]) = 0; sg[periods].page_link = ((unsigned long)sg | 0x01) & ~0x02; |