summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/cppi41.c16
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c147
-rw-r--r--drivers/dma/dw/internal.h61
-rw-r--r--drivers/dma/dw/pci.c8
-rw-r--r--drivers/dma/dw/platform.c92
-rw-r--r--drivers/dma/dw/regs.h41
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/imx-sdma.c35
-rw-r--r--drivers/dma/mmp_tdma.c19
-rw-r--r--drivers/dma/mv_xor.c268
-rw-r--r--drivers/dma/mv_xor.h62
-rw-r--r--drivers/dma/pl330.c19
-rw-r--r--drivers/dma/sh/rcar-audmapp.c15
-rw-r--r--drivers/dma/sun6i-dma.c23
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c1
18 files changed, 346 insertions, 471 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9b1ea0ef59a..96216717780 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -270,7 +270,7 @@ config IMX_SDMA
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51/53 chips.
+ Freescale i.MX25/31/35/51/53/6 chips.
config IMX_DMA
tristate "i.MX DMA support"
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 3c6716e0b78..e88588d8ecd 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2156,7 +2156,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
}
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 8f8b0b60887..a58eec3b2ca 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -938,7 +938,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (!glue_info)
return -EINVAL;
- cdd = kzalloc(sizeof(*cdd), GFP_KERNEL);
+ cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
if (!cdd)
return -ENOMEM;
@@ -959,10 +959,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
- !cdd->qmgr_mem) {
- ret = -ENXIO;
- goto err_remap;
- }
+ !cdd->qmgr_mem)
+ return -ENXIO;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
@@ -989,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
- ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
+ ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
dev_name(dev), cdd);
if (ret)
goto err_irq;
@@ -1009,7 +1007,6 @@ static int cppi41_dma_probe(struct platform_device *pdev)
err_of:
dma_async_device_unregister(&cdd->ddev);
err_dma_reg:
- free_irq(irq, cdd);
err_irq:
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
cleanup_chans(cdd);
@@ -1023,8 +1020,6 @@ err_get_sync:
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
iounmap(cdd->qmgr_mem);
-err_remap:
- kfree(cdd);
return ret;
}
@@ -1036,7 +1031,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&cdd->ddev);
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
- free_irq(cdd->irq, cdd);
+ devm_free_irq(&pdev->dev, cdd->irq, cdd);
cleanup_chans(cdd);
deinit_cppi41(&pdev->dev, cdd);
iounmap(cdd->usbss_mem);
@@ -1045,7 +1040,6 @@ static int cppi41_dma_remove(struct platform_device *pdev)
iounmap(cdd->qmgr_mem);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(cdd);
return 0;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e27cec25c59..a8d7809e2f4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -688,14 +688,14 @@ static int dmatest_func(void *data)
runtime = ktime_us_delta(ktime_get(), ktime);
ret = 0;
+err_dstbuf:
for (i = 0; thread->dsts[i]; i++)
kfree(thread->dsts[i]);
-err_dstbuf:
kfree(thread->dsts);
err_dsts:
+err_srcbuf:
for (i = 0; thread->srcs[i]; i++)
kfree(thread->srcs[i]);
-err_srcbuf:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 1af731b83b3..24472217041 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -11,7 +11,6 @@
*/
#include <linux/bitops.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -37,24 +36,6 @@
* support descriptor writeback.
*/
-static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
-{
- return dwc->request_line == (typeof(dwc->request_line))~0;
-}
-
-static inline void dwc_set_masters(struct dw_dma_chan *dwc)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
- unsigned char mmax = dw->nr_masters - 1;
-
- if (!is_request_line_unset(dwc))
- return;
-
- dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
- dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
-}
-
#define DWC_DEFAULT_CTLLO(_chan) ({ \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
@@ -155,13 +136,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
*/
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
- cfghi = dws->cfg_hi;
- cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
} else {
- if (dwc->direction == DMA_MEM_TO_DEV)
- cfghi = DWC_CFGH_DST_PER(dwc->request_line);
- else if (dwc->direction == DMA_DEV_TO_MEM)
- cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
}
channel_writel(dwc, CFG_LO, cfglo);
@@ -939,6 +918,26 @@ err_desc_get:
return NULL;
}
+bool dw_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_slave *dws = param;
+
+ if (!dws || dws->dma_dev != chan->device->dev)
+ return false;
+
+ /* We have to copy data since dws can be temporary storage */
+
+ dwc->src_id = dws->src_id;
+ dwc->dst_id = dws->dst_id;
+
+ dwc->src_master = dws->src_master;
+ dwc->dst_master = dws->dst_master;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(dw_dma_filter);
+
/*
* Fix sconfig's burst size according to dw_dmac. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
@@ -967,10 +966,6 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->direction = sconfig->direction;
- /* Take the request line from slave_id member */
- if (is_request_line_unset(dwc))
- dwc->request_line = sconfig->slave_id;
-
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
@@ -1099,6 +1094,31 @@ static void dwc_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&dwc->lock, flags);
}
+/*----------------------------------------------------------------------*/
+
+static void dw_dma_off(struct dw_dma *dw)
+{
+ int i;
+
+ dma_writel(dw, CFG, 0);
+
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+ while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
+ cpu_relax();
+
+ for (i = 0; i < dw->dma.chancnt; i++)
+ dw->chan[i].initialized = false;
+}
+
+static void dw_dma_on(struct dw_dma *dw)
+{
+ dma_writel(dw, CFG, DW_CFG_DMA_EN);
+}
+
static int dwc_alloc_chan_resources(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -1123,7 +1143,10 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- dwc_set_masters(dwc);
+ /* Enable controller here if needed */
+ if (!dw->in_use)
+ dw_dma_on(dw);
+ dw->in_use |= dwc->mask;
spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
@@ -1182,7 +1205,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
dwc->initialized = false;
- dwc->request_line = ~0;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1190,6 +1212,11 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&dwc->lock, flags);
+ /* Disable controller in case it was a last user */
+ dw->in_use &= ~dwc->mask;
+ if (!dw->in_use)
+ dw_dma_off(dw);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
@@ -1460,24 +1487,6 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
/*----------------------------------------------------------------------*/
-static void dw_dma_off(struct dw_dma *dw)
-{
- int i;
-
- dma_writel(dw, CFG, 0);
-
- channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
-
- while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
- cpu_relax();
-
- for (i = 0; i < dw->dma.chancnt; i++)
- dw->chan[i].initialized = false;
-}
-
int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
{
struct dw_dma *dw;
@@ -1495,13 +1504,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->regs = chip->regs;
chip->dw = dw;
- dw->clk = devm_clk_get(chip->dev, "hclk");
- if (IS_ERR(dw->clk))
- return PTR_ERR(dw->clk);
- err = clk_prepare_enable(dw->clk);
- if (err)
- return err;
-
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
@@ -1604,7 +1606,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
channel_clear_bit(dw, CH_EN, dwc->mask);
dwc->direction = DMA_TRANS_NONE;
- dwc->request_line = ~0;
/* Hardware configuration */
if (autocfg) {
@@ -1659,8 +1660,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->dma.device_tx_status = dwc_tx_status;
dw->dma.device_issue_pending = dwc_issue_pending;
- dma_writel(dw, CFG, DW_CFG_DMA_EN);
-
err = dma_async_device_register(&dw->dma);
if (err)
goto err_dma_register;
@@ -1673,7 +1672,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
err_dma_register:
free_irq(chip->irq, dw);
err_pdata:
- clk_disable_unprepare(dw->clk);
return err;
}
EXPORT_SYMBOL_GPL(dw_dma_probe);
@@ -1695,46 +1693,27 @@ int dw_dma_remove(struct dw_dma_chip *chip)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
- clk_disable_unprepare(dw->clk);
-
return 0;
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
-void dw_dma_shutdown(struct dw_dma_chip *chip)
-{
- struct dw_dma *dw = chip->dw;
-
- dw_dma_off(dw);
- clk_disable_unprepare(dw->clk);
-}
-EXPORT_SYMBOL_GPL(dw_dma_shutdown);
-
-#ifdef CONFIG_PM_SLEEP
-
-int dw_dma_suspend(struct dw_dma_chip *chip)
+int dw_dma_disable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
dw_dma_off(dw);
- clk_disable_unprepare(dw->clk);
-
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_suspend);
+EXPORT_SYMBOL_GPL(dw_dma_disable);
-int dw_dma_resume(struct dw_dma_chip *chip)
+int dw_dma_enable(struct dw_dma_chip *chip)
{
struct dw_dma *dw = chip->dw;
- clk_prepare_enable(dw->clk);
- dma_writel(dw, CFG, DW_CFG_DMA_EN);
-
+ dw_dma_on(dw);
return 0;
}
-EXPORT_SYMBOL_GPL(dw_dma_resume);
-
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SYMBOL_GPL(dw_dma_enable);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 32667f9e0dd..41439732ff6 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -8,63 +8,16 @@
* published by the Free Software Foundation.
*/
-#ifndef _DW_DMAC_INTERNAL_H
-#define _DW_DMAC_INTERNAL_H
+#ifndef _DMA_DW_INTERNAL_H
+#define _DMA_DW_INTERNAL_H
-#include <linux/device.h>
-#include <linux/dw_dmac.h>
+#include <linux/dma/dw.h>
#include "regs.h"
-/**
- * struct dw_dma_chip - representation of DesignWare DMA controller hardware
- * @dev: struct device of the DMA controller
- * @irq: irq line
- * @regs: memory mapped I/O space
- * @dw: struct dw_dma that is filed by dw_dma_probe()
- */
-struct dw_dma_chip {
- struct device *dev;
- int irq;
- void __iomem *regs;
- struct dw_dma *dw;
-};
-
-/* Export to the platform drivers */
-int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
-int dw_dma_remove(struct dw_dma_chip *chip);
-
-void dw_dma_shutdown(struct dw_dma_chip *chip);
-
-#ifdef CONFIG_PM_SLEEP
-
-int dw_dma_suspend(struct dw_dma_chip *chip);
-int dw_dma_resume(struct dw_dma_chip *chip);
-
-#endif /* CONFIG_PM_SLEEP */
+int dw_dma_disable(struct dw_dma_chip *chip);
+int dw_dma_enable(struct dw_dma_chip *chip);
-/**
- * dwc_get_dms - get destination master
- * @slave: pointer to the custom slave configuration
- *
- * Returns destination master in the custom slave configuration if defined, or
- * default value otherwise.
- */
-static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
-{
- return slave ? slave->dst_master : 0;
-}
-
-/**
- * dwc_get_sms - get source master
- * @slave: pointer to the custom slave configuration
- *
- * Returns source master in the custom slave configuration if defined, or
- * default value otherwise.
- */
-static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
-{
- return slave ? slave->src_master : 1;
-}
+extern bool dw_dma_filter(struct dma_chan *chan, void *param);
-#endif /* _DW_DMAC_INTERNAL_H */
+#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 39e30c3c7a9..b144706b3d8 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -82,7 +82,7 @@ static int dw_pci_suspend_late(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
- return dw_dma_suspend(chip);
+ return dw_dma_disable(chip);
};
static int dw_pci_resume_early(struct device *dev)
@@ -90,7 +90,7 @@ static int dw_pci_resume_early(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
- return dw_dma_resume(chip);
+ return dw_dma_enable(chip);
};
#endif /* CONFIG_PM_SLEEP */
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+ /* Braswell */
+ { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_pdata },
+
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
{ }
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index c5b339af6be..a630161473a 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -25,72 +25,49 @@
#include "internal.h"
-struct dw_dma_of_filter_args {
- struct dw_dma *dw;
- unsigned int req;
- unsigned int src;
- unsigned int dst;
-};
-
-static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_of_filter_args *fargs = param;
-
- /* Ensure the device matches our channel */
- if (chan->device != &fargs->dw->dma)
- return false;
-
- dwc->request_line = fargs->req;
- dwc->src_master = fargs->src;
- dwc->dst_master = fargs->dst;
-
- return true;
-}
-
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_of_filter_args fargs = {
- .dw = dw,
+ struct dw_dma_slave slave = {
+ .dma_dev = dw->dma.dev,
};
dma_cap_mask_t cap;
if (dma_spec->args_count != 3)
return NULL;
- fargs.req = dma_spec->args[0];
- fargs.src = dma_spec->args[1];
- fargs.dst = dma_spec->args[2];
+ slave.src_id = dma_spec->args[0];
+ slave.dst_id = dma_spec->args[0];
+ slave.src_master = dma_spec->args[1];
+ slave.dst_master = dma_spec->args[2];
- if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
- fargs.src >= dw->nr_masters ||
- fargs.dst >= dw->nr_masters))
+ if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.src_master >= dw->nr_masters ||
+ slave.dst_master >= dw->nr_masters))
return NULL;
dma_cap_zero(cap);
dma_cap_set(DMA_SLAVE, cap);
/* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+ return dma_request_channel(cap, dw_dma_filter, &slave);
}
#ifdef CONFIG_ACPI
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct acpi_dma_spec *dma_spec = param;
+ struct dw_dma_slave slave = {
+ .dma_dev = dma_spec->dev,
+ .src_id = dma_spec->slave_id,
+ .dst_id = dma_spec->slave_id,
+ .src_master = 1,
+ .dst_master = 0,
+ };
- if (chan->device->dev != dma_spec->dev ||
- chan->chan_id != dma_spec->chan_id)
- return false;
-
- dwc->request_line = dma_spec->slave_id;
- dwc->src_master = dwc_get_sms(NULL);
- dwc->dst_master = dwc_get_dms(NULL);
-
- return true;
+ return dw_dma_filter(chan, &slave);
}
static void dw_dma_acpi_controller_register(struct dw_dma *dw)
@@ -201,10 +178,17 @@ static int dw_probe(struct platform_device *pdev)
chip->dev = dev;
- err = dw_dma_probe(chip, pdata);
+ chip->clk = devm_clk_get(chip->dev, "hclk");
+ if (IS_ERR(chip->clk))
+ return PTR_ERR(chip->clk);
+ err = clk_prepare_enable(chip->clk);
if (err)
return err;
+ err = dw_dma_probe(chip, pdata);
+ if (err)
+ goto err_dw_dma_probe;
+
platform_set_drvdata(pdev, chip);
if (pdev->dev.of_node) {
@@ -219,6 +203,10 @@ static int dw_probe(struct platform_device *pdev)
dw_dma_acpi_controller_register(chip->dw);
return 0;
+
+err_dw_dma_probe:
+ clk_disable_unprepare(chip->clk);
+ return err;
}
static int dw_remove(struct platform_device *pdev)
@@ -228,14 +216,18 @@ static int dw_remove(struct platform_device *pdev)
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
- return dw_dma_remove(chip);
+ dw_dma_remove(chip);
+ clk_disable_unprepare(chip->clk);
+
+ return 0;
}
static void dw_shutdown(struct platform_device *pdev)
{
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
- dw_dma_shutdown(chip);
+ dw_dma_disable(chip);
+ clk_disable_unprepare(chip->clk);
}
#ifdef CONFIG_OF
@@ -261,7 +253,10 @@ static int dw_suspend_late(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
- return dw_dma_suspend(chip);
+ dw_dma_disable(chip);
+ clk_disable_unprepare(chip->clk);
+
+ return 0;
}
static int dw_resume_early(struct device *dev)
@@ -269,7 +264,8 @@ static int dw_resume_early(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
- return dw_dma_resume(chip);
+ clk_prepare_enable(chip->clk);
+ return dw_dma_enable(chip);
}
#endif /* CONFIG_PM_SLEEP */
@@ -281,7 +277,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
static struct platform_driver dw_driver = {
.probe = dw_probe,
.remove = dw_remove,
- .shutdown = dw_shutdown,
+ .shutdown = dw_shutdown,
.driver = {
.name = "dw_dmac",
.pm = &dw_dev_pm_ops,
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index bb98d3e91e8..848e232f7cc 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
-#include <linux/dw_dmac.h>
#define DW_DMA_MAX_NR_CHANNELS 8
#define DW_DMA_MAX_NR_REQUESTS 16
@@ -132,6 +131,18 @@ struct dw_dma_regs {
/* Bitfields in DWC_PARAMS */
#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
+/* bursts size */
+enum dw_dma_msize {
+ DW_DMA_MSIZE_1,
+ DW_DMA_MSIZE_4,
+ DW_DMA_MSIZE_8,
+ DW_DMA_MSIZE_16,
+ DW_DMA_MSIZE_32,
+ DW_DMA_MSIZE_64,
+ DW_DMA_MSIZE_128,
+ DW_DMA_MSIZE_256,
+};
+
/* Bitfields in CTL_LO */
#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
@@ -161,20 +172,35 @@ struct dw_dma_regs {
#define DWC_CTLH_DONE 0x00001000
#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
-/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+/* Bitfields in CFG_LO */
#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
+#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
+#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
+#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
+#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
+#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
+#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
+#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
+#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
+#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
+#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
#define DWC_CFGL_RELOAD_SAR (1 << 30)
#define DWC_CFGL_RELOAD_DAR (1 << 31)
-/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+/* Bitfields in CFG_HI */
+#define DWC_CFGH_FCMODE (1 << 0)
+#define DWC_CFGH_FIFO_MODE (1 << 1)
+#define DWC_CFGH_PROTCTL(x) ((x) << 2)
#define DWC_CFGH_DS_UPD_EN (1 << 5)
#define DWC_CFGH_SS_UPD_EN (1 << 6)
+#define DWC_CFGH_SRC_PER(x) ((x) << 7)
+#define DWC_CFGH_DST_PER(x) ((x) << 11)
/* Bitfields in SGR */
#define DWC_SGR_SGI(x) ((x) << 0)
@@ -221,9 +247,10 @@ struct dw_dma_chan {
bool nollp;
/* custom slave configuration */
- unsigned int request_line;
- unsigned char src_master;
- unsigned char dst_master;
+ u8 src_id;
+ u8 dst_id;
+ u8 src_master;
+ u8 dst_master;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
@@ -250,11 +277,11 @@ struct dw_dma {
void __iomem *regs;
struct dma_pool *desc_pool;
struct tasklet_struct tasklet;
- struct clk *clk;
/* channels */
struct dw_dma_chan *chan;
u8 all_chan_mask;
+ u8 in_use;
/* hardware configuration */
unsigned char nr_masters;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 7b65633f495..123f578d6dd 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -288,7 +288,7 @@ static int edma_slave_config(struct edma_chan *echan,
static int edma_dma_pause(struct edma_chan *echan)
{
/* Pause/Resume only allowed with cyclic mode */
- if (!echan->edesc->cyclic)
+ if (!echan->edesc || !echan->edesc->cyclic)
return -EINVAL;
edma_pause(echan->ch_num);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f7626e37d0b..88afc48c2ca 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1334,7 +1334,7 @@ err_firmware:
release_firmware(fw);
}
-static int __init sdma_get_firmware(struct sdma_engine *sdma,
+static int sdma_get_firmware(struct sdma_engine *sdma,
const char *fw_name)
{
int ret;
@@ -1448,7 +1448,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
return dma_request_channel(mask, sdma_filter_fn, &data);
}
-static int __init sdma_probe(struct platform_device *pdev)
+static int sdma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(sdma_dt_ids, &pdev->dev);
@@ -1603,6 +1603,8 @@ static int __init sdma_probe(struct platform_device *pdev)
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
dma_set_max_seg_size(sdma->dma_device.dev, 65535);
+ platform_set_drvdata(pdev, sdma);
+
ret = dma_async_device_register(&sdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
@@ -1640,7 +1642,27 @@ err_irq:
static int sdma_remove(struct platform_device *pdev)
{
- return -EBUSY;
+ struct sdma_engine *sdma = platform_get_drvdata(pdev);
+ struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+ int i;
+
+ dma_async_device_unregister(&sdma->dma_device);
+ kfree(sdma->script_addrs);
+ free_irq(irq, sdma);
+ iounmap(sdma->regs);
+ release_mem_region(iores->start, resource_size(iores));
+ /* Kill the tasklet */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ struct sdma_channel *sdmac = &sdma->channel[i];
+
+ tasklet_kill(&sdmac->tasklet);
+ }
+ kfree(sdma);
+
+ platform_set_drvdata(pdev, NULL);
+ dev_info(&pdev->dev, "Removed...\n");
+ return 0;
}
static struct platform_driver sdma_driver = {
@@ -1650,13 +1672,10 @@ static struct platform_driver sdma_driver = {
},
.id_table = sdma_devtypes,
.remove = sdma_remove,
+ .probe = sdma_probe,
};
-static int __init sdma_module_init(void)
-{
- return platform_driver_probe(&sdma_driver, sdma_probe);
-}
-module_init(sdma_module_init);
+module_platform_driver(sdma_driver);
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX SDMA driver");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 6ad30e2c503..c6bd015b716 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -148,10 +148,16 @@ static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
tdmac->reg_base + TDCR);
}
+static void mmp_tdma_enable_irq(struct mmp_tdma_chan *tdmac, bool enable)
+{
+ if (enable)
+ writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
+ else
+ writel(0, tdmac->reg_base + TDIMR);
+}
+
static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
{
- /* enable irq */
- writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
/* enable dma chan */
writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
tdmac->reg_base + TDCR);
@@ -163,9 +169,6 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
tdmac->reg_base + TDCR);
- /* disable irq */
- writel(0, tdmac->reg_base + TDIMR);
-
tdmac->status = DMA_COMPLETE;
}
@@ -434,6 +437,10 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
i++;
}
+ /* enable interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ mmp_tdma_enable_irq(tdmac, true);
+
tdmac->buf_len = buf_len;
tdmac->period_len = period_len;
tdmac->pos = 0;
@@ -455,6 +462,8 @@ static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mmp_tdma_disable_chan(tdmac);
+ /* disable interrupt */
+ mmp_tdma_enable_irq(tdmac, false);
break;
case DMA_PAUSE:
mmp_tdma_pause_chan(tdmac);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 394cbc5c93e..769d35c3a82 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -45,19 +45,18 @@ static void mv_xor_issue_pending(struct dma_chan *chan);
#define mv_chan_to_devp(chan) \
((chan)->dmadev.dev)
-static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
+static void mv_desc_init(struct mv_xor_desc_slot *desc,
+ dma_addr_t addr, u32 byte_count,
+ enum dma_ctrl_flags flags)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->status = (1 << 31);
+ hw_desc->status = XOR_DESC_DMA_OWNED;
hw_desc->phy_next_desc = 0;
- hw_desc->desc_command = (1 << 31);
-}
-
-static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
- u32 byte_count)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
+ /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
+ hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
+ XOR_DESC_EOD_INT_EN : 0;
+ hw_desc->phy_dest_addr = addr;
hw_desc->byte_count = byte_count;
}
@@ -75,20 +74,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
hw_desc->phy_next_desc = 0;
}
-static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
- dma_addr_t addr)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->phy_dest_addr = addr;
-}
-
-static int mv_chan_memset_slot_count(size_t len)
-{
- return 1;
-}
-
-#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
-
static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
@@ -123,17 +108,12 @@ static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
return intr_cause;
}
-static int mv_is_err_intr(u32 intr_cause)
-{
- if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
- return 1;
-
- return 0;
-}
-
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
- u32 val = ~(1 << (chan->idx * 16));
+ u32 val;
+
+ val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
+ val = ~(val << (chan->idx * 16));
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
@@ -144,17 +124,6 @@ static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
-static int mv_can_chain(struct mv_xor_desc_slot *desc)
-{
- struct mv_xor_desc_slot *chain_old_tail = list_entry(
- desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
-
- if (chain_old_tail->type != desc->type)
- return 0;
-
- return 1;
-}
-
static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
@@ -206,11 +175,6 @@ static char mv_chan_is_busy(struct mv_xor_chan *chan)
return (state == 1) ? 1 : 0;
}
-static int mv_chan_xor_slot_count(size_t len, int src_cnt)
-{
- return 1;
-}
-
/**
* mv_xor_free_slots - flags descriptor slots for reuse
* @slot: Slot to free
@@ -222,7 +186,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
__func__, __LINE__, slot);
- slot->slots_per_op = 0;
+ slot->slot_used = 0;
}
@@ -236,13 +200,11 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
{
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
- if (sw_desc->type != mv_chan->current_type)
- mv_set_mode(mv_chan, sw_desc->type);
/* set the hardware chain */
mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
- mv_chan->pending += sw_desc->slot_cnt;
+ mv_chan->pending++;
mv_xor_issue_pending(&mv_chan->dmachan);
}
@@ -263,8 +225,6 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
desc->async_tx.callback_param);
dma_descriptor_unmap(&desc->async_tx);
- if (desc->group_head)
- desc->group_head = NULL;
}
/* run dependent operations */
@@ -381,19 +341,16 @@ static void mv_xor_tasklet(unsigned long data)
}
static struct mv_xor_desc_slot *
-mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
- int slots_per_op)
+mv_xor_alloc_slot(struct mv_xor_chan *mv_chan)
{
- struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
- LIST_HEAD(chain);
- int slots_found, retry = 0;
+ struct mv_xor_desc_slot *iter, *_iter;
+ int retry = 0;
/* start search from the last allocated descrtiptor
* if a contiguous allocation can not be found start searching
* from the beginning of the list
*/
retry:
- slots_found = 0;
if (retry == 0)
iter = mv_chan->last_used;
else
@@ -403,55 +360,29 @@ retry:
list_for_each_entry_safe_continue(
iter, _iter, &mv_chan->all_slots, slot_node) {
+
prefetch(_iter);
prefetch(&_iter->async_tx);
- if (iter->slots_per_op) {
+ if (iter->slot_used) {
/* give up after finding the first busy slot
* on the second pass through the list
*/
if (retry)
break;
-
- slots_found = 0;
continue;
}
- /* start the allocation if the slot is correctly aligned */
- if (!slots_found++)
- alloc_start = iter;
-
- if (slots_found == num_slots) {
- struct mv_xor_desc_slot *alloc_tail = NULL;
- struct mv_xor_desc_slot *last_used = NULL;
- iter = alloc_start;
- while (num_slots) {
- int i;
-
- /* pre-ack all but the last descriptor */
- async_tx_ack(&iter->async_tx);
-
- list_add_tail(&iter->chain_node, &chain);
- alloc_tail = iter;
- iter->async_tx.cookie = 0;
- iter->slot_cnt = num_slots;
- iter->xor_check_result = NULL;
- for (i = 0; i < slots_per_op; i++) {
- iter->slots_per_op = slots_per_op - i;
- last_used = iter;
- iter = list_entry(iter->slot_node.next,
- struct mv_xor_desc_slot,
- slot_node);
- }
- num_slots -= slots_per_op;
- }
- alloc_tail->group_head = alloc_start;
- alloc_tail->async_tx.cookie = -EBUSY;
- list_splice(&chain, &alloc_tail->tx_list);
- mv_chan->last_used = last_used;
- mv_desc_clear_next_desc(alloc_start);
- mv_desc_clear_next_desc(alloc_tail);
- return alloc_tail;
- }
+ /* pre-ack descriptor */
+ async_tx_ack(&iter->async_tx);
+
+ iter->slot_used = 1;
+ INIT_LIST_HEAD(&iter->chain_node);
+ iter->async_tx.cookie = -EBUSY;
+ mv_chan->last_used = iter;
+ mv_desc_clear_next_desc(iter);
+
+ return iter;
+
}
if (!retry++)
goto retry;
@@ -468,7 +399,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
- struct mv_xor_desc_slot *grp_start, *old_chain_tail;
+ struct mv_xor_desc_slot *old_chain_tail;
dma_cookie_t cookie;
int new_hw_chain = 1;
@@ -476,30 +407,24 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
- grp_start = sw_desc->group_head;
-
spin_lock_bh(&mv_chan->lock);
cookie = dma_cookie_assign(tx);
if (list_empty(&mv_chan->chain))
- list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
+ list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
else {
new_hw_chain = 0;
old_chain_tail = list_entry(mv_chan->chain.prev,
struct mv_xor_desc_slot,
chain_node);
- list_splice_init(&grp_start->tx_list,
- &old_chain_tail->chain_node);
-
- if (!mv_can_chain(grp_start))
- goto submit_done;
+ list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
&old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
- mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
+ mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
/* if the channel is not busy */
if (!mv_chan_is_busy(mv_chan)) {
@@ -514,9 +439,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
}
if (new_hw_chain)
- mv_xor_start_new_chain(mv_chan, grp_start);
+ mv_xor_start_new_chain(mv_chan, sw_desc);
-submit_done:
spin_unlock_bh(&mv_chan->lock);
return cookie;
@@ -537,8 +461,9 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
while (idx < num_descs_in_pool) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
- printk(KERN_INFO "MV XOR Channel only initialized"
- " %d descriptor slots", idx);
+ dev_info(mv_chan_to_devp(mv_chan),
+ "channel only initialized %d descriptor slots",
+ idx);
break;
}
virt_desc = mv_chan->dma_desc_pool_virt;
@@ -548,7 +473,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
- INIT_LIST_HEAD(&slot->tx_list);
dma_desc = mv_chan->dma_desc_pool;
slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
@@ -572,51 +496,11 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
}
static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
- struct mv_xor_desc_slot *sw_desc, *grp_start;
- int slot_cnt;
-
- dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %pad src %pad len: %u flags: %ld\n",
- __func__, &dest, &src, len, flags);
- if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
- return NULL;
-
- BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
-
- spin_lock_bh(&mv_chan->lock);
- slot_cnt = mv_chan_memcpy_slot_count(len);
- sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
- if (sw_desc) {
- sw_desc->type = DMA_MEMCPY;
- sw_desc->async_tx.flags = flags;
- grp_start = sw_desc->group_head;
- mv_desc_init(grp_start, flags);
- mv_desc_set_byte_count(grp_start, len);
- mv_desc_set_dest_addr(sw_desc->group_head, dest);
- mv_desc_set_src_addr(grp_start, 0, src);
- sw_desc->unmap_src_cnt = 1;
- sw_desc->unmap_len = len;
- }
- spin_unlock_bh(&mv_chan->lock);
-
- dev_dbg(mv_chan_to_devp(mv_chan),
- "%s sw_desc %p async_tx %p\n",
- __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
- struct mv_xor_desc_slot *sw_desc, *grp_start;
- int slot_cnt;
+ struct mv_xor_desc_slot *sw_desc;
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -628,20 +512,13 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
__func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
- slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
- sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+ sw_desc = mv_xor_alloc_slot(mv_chan);
if (sw_desc) {
sw_desc->type = DMA_XOR;
sw_desc->async_tx.flags = flags;
- grp_start = sw_desc->group_head;
- mv_desc_init(grp_start, flags);
- /* the byte count field is the same as in memcpy desc*/
- mv_desc_set_byte_count(grp_start, len);
- mv_desc_set_dest_addr(sw_desc->group_head, dest);
- sw_desc->unmap_src_cnt = src_cnt;
- sw_desc->unmap_len = len;
+ mv_desc_init(sw_desc, dest, len, flags);
while (src_cnt--)
- mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
+ mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
}
spin_unlock_bh(&mv_chan->lock);
dev_dbg(mv_chan_to_devp(mv_chan),
@@ -650,6 +527,35 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
return sw_desc ? &sw_desc->async_tx : NULL;
}
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ /*
+ * A MEMCPY operation is identical to an XOR operation with only
+ * a single source address.
+ */
+ return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ dma_addr_t src, dest;
+ size_t len;
+
+ src = mv_chan->dummy_src_addr;
+ dest = mv_chan->dummy_dst_addr;
+ len = MV_XOR_MIN_BYTE_COUNT;
+
+ /*
+ * We implement the DMA_INTERRUPT operation as a minimum sized
+ * XOR operation with a single dummy source address.
+ */
+ return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
+}
+
static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -735,18 +641,16 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
u32 intr_cause)
{
- if (intr_cause & (1 << 4)) {
- dev_dbg(mv_chan_to_devp(chan),
- "ignore this error\n");
- return;
+ if (intr_cause & XOR_INT_ERR_DECODE) {
+ dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
+ return;
}
- dev_err(mv_chan_to_devp(chan),
- "error on chan %d. intr cause 0x%08x\n",
+ dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
chan->idx, intr_cause);
mv_dump_xor_regs(chan);
- BUG();
+ WARN_ON(1);
}
static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
@@ -756,7 +660,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
- if (mv_is_err_intr(intr_cause))
+ if (intr_cause & XOR_INTR_ERRORS)
mv_xor_err_interrupt_handler(chan, intr_cause);
tasklet_schedule(&chan->irq_tasklet);
@@ -995,6 +899,10 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
dma_free_coherent(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+ dma_unmap_single(dev, mv_chan->dummy_src_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, mv_chan->dummy_dst_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
device_node) {
@@ -1024,6 +932,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev = &mv_chan->dmadev;
+ /*
+ * These source and destination dummy buffers are used to implement
+ * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
+ * Hence, we only need to map the buffers at initialization-time.
+ */
+ mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
+ mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
+ mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
@@ -1048,6 +966,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
+ if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
@@ -1070,7 +990,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan_unmask_interrupts(mv_chan);
- mv_set_mode(mv_chan, DMA_MEMCPY);
+ mv_set_mode(mv_chan, DMA_XOR);
spin_lock_init(&mv_chan->lock);
INIT_LIST_HEAD(&mv_chan->chain);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index d0749229c87..78edc7e4456 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -23,17 +23,22 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
-#define USE_TIMER
#define MV_XOR_POOL_SIZE PAGE_SIZE
#define MV_XOR_SLOT_SIZE 64
#define MV_XOR_THRESHOLD 1
#define MV_XOR_MAX_CHANNELS 2
+#define MV_XOR_MIN_BYTE_COUNT SZ_128
+#define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1)
+
/* Values for the XOR_CONFIG register */
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_DESCRIPTOR_SWAP BIT(14)
+#define XOR_DESC_DMA_OWNED BIT(31)
+#define XOR_DESC_EOD_INT_EN BIT(31)
+
#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
@@ -48,7 +53,24 @@
#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
-#define XOR_INTR_MASK_VALUE 0x3F5
+
+#define XOR_INT_END_OF_DESC BIT(0)
+#define XOR_INT_END_OF_CHAIN BIT(1)
+#define XOR_INT_STOPPED BIT(2)
+#define XOR_INT_PAUSED BIT(3)
+#define XOR_INT_ERR_DECODE BIT(4)
+#define XOR_INT_ERR_RDPROT BIT(5)
+#define XOR_INT_ERR_WRPROT BIT(6)
+#define XOR_INT_ERR_OWN BIT(7)
+#define XOR_INT_ERR_PAR BIT(8)
+#define XOR_INT_ERR_MBUS BIT(9)
+
+#define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
+ XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \
+ XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS)
+
+#define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
+ XOR_INT_STOPPED | XOR_INTR_ERRORS)
#define WINDOW_BASE(w) (0x50 + ((w) << 2))
#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
@@ -97,10 +119,9 @@ struct mv_xor_chan {
struct list_head all_slots;
int slots_allocated;
struct tasklet_struct irq_tasklet;
-#ifdef USE_TIMER
- unsigned long cleanup_time;
- u32 current_on_last_cleanup;
-#endif
+ char dummy_src[MV_XOR_MIN_BYTE_COUNT];
+ char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
+ dma_addr_t dummy_src_addr, dummy_dst_addr;
};
/**
@@ -110,16 +131,10 @@ struct mv_xor_chan {
* @completed_node: node on the mv_xor_chan.completed_slots list
* @hw_desc: virtual address of the hardware descriptor chain
* @phys: hardware address of the hardware descriptor chain
- * @group_head: first operation in a transaction
- * @slot_cnt: total slots used in an transaction (group of operations)
- * @slots_per_op: number of slots per operation
+ * @slot_used: slot in use or not
* @idx: pool index
- * @unmap_src_cnt: number of xor sources
- * @unmap_len: transaction bytecount
* @tx_list: list of slots that make up a multi-descriptor transaction
* @async_tx: support for the async_tx api
- * @xor_check_result: result of zero sum
- * @crc32_result: result crc calculation
*/
struct mv_xor_desc_slot {
struct list_head slot_node;
@@ -127,23 +142,9 @@ struct mv_xor_desc_slot {
struct list_head completed_node;
enum dma_transaction_type type;
void *hw_desc;
- struct mv_xor_desc_slot *group_head;
- u16 slot_cnt;
- u16 slots_per_op;
+ u16 slot_used;
u16 idx;
- u16 unmap_src_cnt;
- u32 value;
- size_t unmap_len;
- struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
- union {
- u32 *xor_check_result;
- u32 *crc32_result;
- };
-#ifdef USE_TIMER
- unsigned long arrival_time;
- struct timer_list timeout;
-#endif
};
/*
@@ -189,9 +190,4 @@ struct mv_xor_desc {
#define mv_hw_desc_slot_idx(hw_desc, idx) \
((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
-#define MV_XOR_MIN_BYTE_COUNT (128)
-#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
-#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
-
-
#endif
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index d5149aacd2f..4839bfa74a1 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1367,17 +1367,10 @@ static int pl330_submit_req(struct pl330_thread *thrd,
struct pl330_dmac *pl330 = thrd->dmac;
struct _xfer_spec xs;
unsigned long flags;
- void __iomem *regs;
unsigned idx;
u32 ccr;
int ret = 0;
- /* No Req or Unacquired Channel or DMAC */
- if (!desc || !thrd || thrd->free)
- return -EINVAL;
-
- regs = thrd->dmac->base;
-
if (pl330->state == DYING
|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
@@ -2755,8 +2748,10 @@ probe_err3:
list_del(&pch->chan.device_node);
/* Flush the channel */
- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
- pl330_free_chan_resources(&pch->chan);
+ if (pch->thread) {
+ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+ pl330_free_chan_resources(&pch->chan);
+ }
}
probe_err2:
pl330_del(pl330);
@@ -2782,8 +2777,10 @@ static int pl330_remove(struct amba_device *adev)
list_del(&pch->chan.device_node);
/* Flush the channel */
- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
- pl330_free_chan_resources(&pch->chan);
+ if (pch->thread) {
+ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+ pl330_free_chan_resources(&pch->chan);
+ }
}
pl330_del(pl330);
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
index dabbf0aba2e..80fd2aeb487 100644
--- a/drivers/dma/sh/rcar-audmapp.c
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -117,7 +117,7 @@ static void audmapp_start_xfer(struct shdma_chan *schan,
audmapp_write(auchan, chcr, PDMACHCR);
}
-static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
+static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
u32 *chcr, dma_addr_t *dst)
{
struct audmapp_device *audev = to_dev(auchan);
@@ -131,20 +131,22 @@ static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
if (!pdata) { /* DT */
*chcr = ((u32)slave_id) << 16;
auchan->shdma_chan.slave_id = (slave_id) >> 8;
- return;
+ return 0;
}
/* non-DT */
if (slave_id >= AUDMAPP_SLAVE_NUMBER)
- return;
+ return -ENXIO;
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
if (cfg->slave_id == slave_id) {
*chcr = cfg->chcr;
*dst = cfg->dst;
- break;
+ return 0;
}
+
+ return -ENXIO;
}
static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
@@ -153,8 +155,11 @@ static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
struct audmapp_chan *auchan = to_chan(schan);
u32 chcr;
dma_addr_t dst;
+ int ret;
- audmapp_get_config(auchan, slave_id, &chcr, &dst);
+ ret = audmapp_get_config(auchan, slave_id, &chcr, &dst);
+ if (ret < 0)
+ return ret;
if (try)
return 0;
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 1f92a56fd2b..3aa10b32825 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -862,7 +862,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
{
struct sun6i_dma_dev *sdc;
struct resource *res;
- struct clk *mux, *pll6;
int ret, i;
sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
@@ -886,28 +885,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
return PTR_ERR(sdc->clk);
}
- mux = clk_get(NULL, "ahb1_mux");
- if (IS_ERR(mux)) {
- dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n");
- return PTR_ERR(mux);
- }
-
- pll6 = clk_get(NULL, "pll6");
- if (IS_ERR(pll6)) {
- dev_err(&pdev->dev, "Couldn't get PLL6\n");
- clk_put(mux);
- return PTR_ERR(pll6);
- }
-
- ret = clk_set_parent(mux, pll6);
- clk_put(pll6);
- clk_put(mux);
-
- if (ret) {
- dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n");
- return ret;
- }
-
sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(sdc->rstc)) {
dev_err(&pdev->dev, "No reset controller specified\n");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 42a13e8d460..a6e64767186 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1365,7 +1365,6 @@ static const struct of_device_id xilinx_vdma_of_ids[] = {
static struct platform_driver xilinx_vdma_driver = {
.driver = {
.name = "xilinx-vdma",
- .owner = THIS_MODULE,
.of_match_table = xilinx_vdma_of_ids,
},
.probe = xilinx_vdma_probe,