summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig27
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c43
-rw-r--r--drivers/dma/at_hdmac.c103
-rw-r--r--drivers/dma/at_hdmac_regs.h1
-rw-r--r--drivers/dma/coh901318.c12
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/coh901318_lli.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c46
-rw-r--r--drivers/dma/dw_dmac.c83
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c90
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c10
-rw-r--r--drivers/dma/imx-sdma.c27
-rw-r--r--drivers/dma/intel_mid_dma.c39
-rw-r--r--drivers/dma/intel_mid_dma_regs.h4
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/ipu/ipu_idmac.c29
-rw-r--r--drivers/dma/mpc512x_dma.c12
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/dma/mxs-dma.c59
-rw-r--r--drivers/dma/pch_dma.c20
-rw-r--r--drivers/dma/pl330.c132
-rw-r--r--drivers/dma/shdma.c72
-rw-r--r--drivers/dma/sirf-dma.c707
-rw-r--r--drivers/dma/ste_dma40.c441
-rw-r--r--drivers/dma/ste_dma40_ll.h11
-rw-r--r--drivers/dma/timb_dma.c30
-rw-r--r--drivers/dma/txx9dmac.c12
31 files changed, 1608 insertions, 466 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5a99bb3f255..f1a274994bb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on SOC_IMX31 || SOC_IMX35
+ depends on ARCH_MXC
select DMA_ENGINE
default y
help
@@ -187,6 +187,13 @@ config TIMB_DMA
help
Enable support for the Timberdale FPGA DMA engine.
+config SIRF_DMA
+ tristate "CSR SiRFprimaII DMA support"
+ depends on ARCH_PRIMA2
+ select DMA_ENGINE
+ help
+ Enable support for the CSR SiRFprimaII DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
@@ -201,26 +208,26 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+ tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
+ depends on ARCH_MXC
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51 chips.
+ Freescale i.MX25/31/35/51/53 chips.
config IMX_DMA
tristate "i.MX DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1f0c5..009a222e828 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b7cbd1ab1db..8a281584458 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
int ret;
/* Check if we already have a channel */
- if (plchan->phychan)
- return 0;
+ if (plchan->phychan) {
+ ch = plchan->phychan;
+ goto got_channel;
+ }
ch = pl08x_get_phy_channel(pl08x, plchan);
if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
-
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_TO_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_FROM_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
+ plchan->phychan = ch;
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
ch->id,
ch->signal,
plchan->name);
+got_channel:
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_MEM_TO_DEV)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_DEV_TO_MEM)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
plchan->phychan_hold++;
- plchan->phychan = ch;
return 0;
}
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
- if (config->direction == DMA_TO_DEVICE) {
+ if (config->direction == DMA_MEM_TO_DEV) {
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_FROM_DEVICE) {
+ } else if (config->direction == DMA_DEV_TO_MEM) {
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
addr_width,
maxburst,
cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
txd->direction = direction;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
txd->cctl = plchan->dst_cctl;
slave_addr = plchan->dst_addr;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
txd->cctl = plchan->src_cctl;
slave_addr = plchan->src_addr;
} else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
if (plchan->cd->device_fc)
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
list_add_tail(&dsg->node, &txd->dsg_list);
dsg->len = sg_dma_len(sg);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
dsg->src_addr = sg_phys(sg);
dsg->dst_addr = slave_addr;
} else {
@@ -2054,6 +2057,8 @@ static struct amba_id pl08x_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl08x_ids);
+
static struct amba_driver pl08x_amba_driver = {
.drv.name = DRIVER_NAME,
.id_table = pl08x_ids,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fcfa0a8b5c5..97f87b29b9f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "at_hdmac_regs.h"
@@ -660,7 +662,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
sg_len,
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
flags);
if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb = ATC_IEN;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
total_len += len;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ err_desc_get:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
goto err_out;
return 0;
@@ -810,7 +812,7 @@ err_out:
static int
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
u32 ctrla;
unsigned int reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| period_len >> reg_width;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.saddr = buf_addr + (period_len * period_index);
desc->lli.daddr = atslave->tx_reg;
desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| ATC_DIF(AT_DMA_PER_IF);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.saddr = atslave->rx_reg;
desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int i;
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
buf_addr,
periods, buf_len, period_len);
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*-- Module Management -----------------------------------------------*/
+/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
+static struct at_dma_platform_data at91sam9rl_config = {
+ .nr_channels = 2,
+};
+static struct at_dma_platform_data at91sam9g45_config = {
+ .nr_channels = 8,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_dma_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9rl-dma",
+ .data = &at91sam9rl_config,
+ }, {
+ .compatible = "atmel,at91sam9g45-dma",
+ .data = &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
+#endif
+
+static const struct platform_device_id atdma_devtypes[] = {
+ {
+ .name = "at91sam9rl_dma",
+ .driver_data = (unsigned long) &at91sam9rl_config,
+ }, {
+ .name = "at91sam9g45_dma",
+ .driver_data = (unsigned long) &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+ struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
+ if (match == NULL)
+ return NULL;
+ return match->data;
+ }
+ return (struct at_dma_platform_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
/**
* at_dma_off - disable DMA controller
* @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
static int __init at_dma_probe(struct platform_device *pdev)
{
- struct at_dma_platform_data *pdata;
struct resource *io;
struct at_dma *atdma;
size_t size;
int irq;
int err;
int i;
+ struct at_dma_platform_data *plat_dat;
- /* get DMA Controller parameters from platform */
- pdata = pdev->dev.platform_data;
- if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
+ /* setup platform data for each SoC */
+ dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+ dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+
+ /* get DMA parameters from controller type */
+ plat_dat = at_dma_get_driver_data(pdev);
+ if (!plat_dat)
+ return -ENODEV;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
return irq;
size = sizeof(struct at_dma);
- size += pdata->nr_channels * sizeof(struct at_dma_chan);
+ size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
atdma = kzalloc(size, GFP_KERNEL);
if (!atdma)
return -ENOMEM;
- /* discover transaction capabilites from the platform data */
- atdma->dma_common.cap_mask = pdata->cap_mask;
- atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
+ /* discover transaction capabilities */
+ atdma->dma_common.cap_mask = plat_dat->cap_mask;
+ atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++) {
+ for (i = 0; i < plat_dat->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- pdata->nr_channels);
+ plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
static struct platform_driver at_dma_driver = {
.remove = __exit_p(at_dma_remove),
.shutdown = at_dma_shutdown,
+ .id_table = atdma_devtypes,
.driver = {
.name = "at_hdmac",
.pm = &at_dma_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_dma_dt_ids),
},
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index aa4c9aebab7..dcaedfc181c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @chan_common: common dmaengine dma_device object members
+ * @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4234f416ef1..d65a718c0f9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,7 @@ struct coh901318_desc {
struct scatterlist *sg;
unsigned int sg_len;
struct coh901318_lli *lli;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_last |= cohc->runtime_ctrl;
ctrl |= cohc->runtime_ctrl;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 9f7e0e6a7ee..6c0e2d4c668 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -7,11 +7,10 @@
* Author: Per Friden <per.friden@stericsson.com>
*/
-#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/dmapool.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/dmapool.h>
#include <mach/coh901318.h>
#include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
int s = size;
dma_addr_t src;
dma_addr_t dst;
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
src = buf;
dst = dev_addr;
- } else if (dir == DMA_FROM_DEVICE) {
+ } else if (dir == DMA_DEV_TO_MEM) {
src = dev_addr;
dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli = coh901318_lli_next(lli);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
src += block_size;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
dst += block_size;
}
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sgl, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask)
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask)
{
int i;
struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
spin_lock(&pool->lock);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
dst = dev_addr;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
src = dev_addr;
else
goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
/* increment source address */
src = sg_phys(sg);
else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- if (dir == DMA_FROM_DEVICE)
+ if (dir == DMA_DEV_TO_MEM)
dst += elem_size;
else
src += elem_size;
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
index 7a5c80990e9..abff3714fdd 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318_lli.h
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_data_direction dir);
+ enum dma_transfer_direction dir);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sg, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained,
u32 ctrl, u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask);
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask);
#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b499d..a6c6051ec85 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
+ BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+ !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d8641cf5..2b8661b54ea 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
+static void dmatest_callback(void *arg)
{
- complete(completion);
+ struct dmatest_done *done = arg;
+
+ done->done = true;
+ wake_up_all(done->wait);
}
/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
*/
static int dmatest_func(void *data)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
+ struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
const char *thread_name;
unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
- set_freezable_with_signal();
+ set_freezable();
ret = -ENOMEM;
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
- struct completion cmp;
- unsigned long start, tmo, end = 0 /* compiler... */;
- bool reload = true;
u8 align = 0;
total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
continue;
}
- init_completion(&cmp);
+ done.done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &cmp;
+ tx->callback_param = &done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- do {
- start = jiffies;
- if (reload)
- end = start + msecs_to_jiffies(timeout);
- else if (end <= start)
- end = start + 1;
- tmo = wait_for_completion_interruptible_timeout(&cmp,
- end - start);
- reload = try_to_freeze();
- } while (tmo == -ERESTARTSYS);
+ wait_event_freezable_timeout(done_wait, done.done,
+ msecs_to_jiffies(timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (tmo == 0) {
+ if (!done.done) {
+ /*
+ * We're leaving the timed out dma operation with
+ * dangling pointer to done_wait. To make this
+ * correct, we'll need to allocate wait_done for
+ * each test iteration and perform "who's gonna
+ * free it this time?" dancing. For now, just
+ * leave it dangling.
+ */
pr_warning("%s: #%u: test timed out\n",
thread_name, total_tests - 1);
failed_tests++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9bfd6d36071..9b592b02b5f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
return cookie;
}
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ if (dwc->initialized == true)
+ return;
+
+ if (dws) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+ cfghi = dws->cfg_hi;
+ cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ }
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ dwc->initialized = true;
+}
+
/*----------------------------------------------------------------------*/
/* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
return;
}
+ dwc_initialize(dwc);
+
channel_writel(dwc, LLP, first->txd.phys);
channel_writel(dwc, CTL_LO,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ err_desc_get:
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev = first = NULL;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
goto slave_sg_todev_fill_desc;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
- struct dw_dma_slave *dws;
int i;
- u32 cfghi;
- u32 cfglo;
unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dwc->completed = chan->cookie = 1;
- cfghi = DWC_CFGH_FIFO_MODE;
- cfglo = 0;
-
- dws = chan->private;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi = dws->cfg_hi;
- cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
- }
-
- cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-
/*
* NOTE: some controllers may have additional features that we
* need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
i = ++dwc->descs_allocated;
}
- /* Enable interrupts */
- channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+ dwc->initialized = false;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
*/
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_cyclic_desc *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err_desc_get;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
| DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
static void dw_dma_off(struct dw_dma *dw)
{
+ int i;
+
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
+
+ for (i = 0; i < dw->dma.chancnt; i++)
+ dw->chan[i].initialized = false;
}
static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
dw_dma_off(platform_get_drvdata(pdev));
clk_disable(dw->clk);
+
return 0;
}
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index c3419518d70..5eef6946a36 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -140,6 +140,7 @@ struct dw_dma_chan {
u8 mask;
u8 priority;
bool paused;
+ bool initialized;
spinlock_t lock;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index b47e2b803fa..59e7a965772 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
+ if (list_empty(&edmac->active))
+ return NULL;
+
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
}
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
*/
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
{
+ struct ep93xx_dma_desc *desc;
+
list_rotate_left(&edmac->active);
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
return true;
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc)
+ return false;
+
/*
* If txd.cookie is set it means that we are back in the first
* descriptor in the chain and hence done with it.
*/
- return !ep93xx_dma_get_active(edmac)->txd.cookie;
+ return !desc->txd.cookie;
}
/*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
u32 bus_addr;
- if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+ return;
+ }
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
bus_addr = desc->src_addr;
else
bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- control |= M2M_CONTROL_NO_HDSK;
- control |= M2M_CONTROL_RSS_IDE;
- control |= M2M_CONTROL_PW_16;
-
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_SAH;
control |= M2M_CONTROL_TM_RX;
}
+
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
break;
default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
+
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+ return;
+ }
if (edmac->buffer == 0) {
writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback;
- void *callback_param;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
LIST_HEAD(list);
spin_lock_irq(&edmac->lock);
+ /*
+ * If dma_terminate_all() was called before we get to run, the active
+ * list has become empty. If that happens we aren't supposed to do
+ * anything more than call ep93xx_dma_advance_work().
+ */
desc = ep93xx_dma_get_active(edmac);
- if (desc->complete) {
- edmac->last_completed = desc->txd.cookie;
- list_splice_init(&edmac->active, &list);
+ if (desc) {
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
}
spin_unlock_irq(&edmac->lock);
/* Pick up the next descriptor from the queue */
ep93xx_dma_advance_work(edmac);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
-
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
/*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
{
struct ep93xx_dma_chan *edmac = dev_id;
+ struct ep93xx_dma_desc *desc;
irqreturn_t ret = IRQ_HANDLED;
spin_lock(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac),
+ "got interrupt while active list is empty\n");
+ spin_unlock(&edmac->lock);
+ return IRQ_NONE;
+ }
+
switch (edmac->edma->hw_interrupt(edmac)) {
case INTERRUPT_DONE:
- ep93xx_dma_get_active(edmac)->complete = true;
+ desc->complete = true;
tasklet_schedule(&edmac->tasklet);
break;
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_TO_DEVICE &&
- data->direction != DMA_FROM_DEVICE)
+ if (data->direction != DMA_MEM_TO_DEV &&
+ data->direction != DMA_DEV_TO_MEM)
return -EINVAL;
break;
default:
@@ -952,7 +988,7 @@ fail:
*/
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction dir,
+ unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = sg_dma_address(sg);
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1032,7 +1068,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = dma_addr + offset;
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
return -EINVAL;
switch (config->direction) {
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
break;
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
width = config->dst_addr_width;
addr = config->dst_addr;
break;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 8a781540590..b98070c33ca 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -772,7 +772,7 @@ fail:
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
/*
* This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
return -ENXIO;
/* we set the controller burst size depending on direction */
- if (config->direction == DMA_TO_DEVICE)
+ if (config->direction == DMA_MEM_TO_DEV)
size = config->dst_addr_width * config->dst_maxburst;
else
size = config->src_addr_width * config->src_maxburst;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4be55f9bb6c..e4383ee2c9a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
imx_dma_disable(imxdmac->imxdma_channel);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg->length;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f993955a640..a8af379680c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -247,7 +247,7 @@ struct sdma_engine;
struct sdma_channel {
struct sdma_engine *sdma;
unsigned int channel;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
struct dma_async_tx_descriptor desc;
dma_cookie_t last_completed;
enum dma_status status;
+ unsigned int chn_count;
+ unsigned int chn_real_count;
};
#define IMX_DMA_SG_LOOP (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd;
int i, error = 0;
+ sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
+ sdmac->chn_real_count += bd->mode.count;
}
if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
else
sdmac->status = DMA_SUCCESS;
+ sdmac->last_completed = sdmac->desc.cookie;
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
- sdmac->last_completed = sdmac->desc.cookie;
}
static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
- if (sdmac->direction == DMA_FROM_DEVICE) {
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
} else {
load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
+ unsigned long flags;
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
struct sdma_engine *sdma = sdmac->sdma;
dma_cookie_t cookie;
- spin_lock_irq(&sdmac->lock);
+ spin_lock_irqsave(&sdmac->lock, flags);
cookie = sdma_assign_cookie(sdmac);
sdma_enable_channel(sdma, sdmac->channel);
- spin_unlock_irq(&sdmac->lock);
+ spin_unlock_irqrestore(&sdmac->lock, flags);
return cookie;
}
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
goto err_out;
}
+ sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
}
bd->mode.count = count;
+ sdmac->chn_count += count;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
ret = -EINVAL;
@@ -1008,7 +1015,7 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdma_disable_channel(sdmac);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
sdmac->per_address = dmaengine_cfg->src_addr;
sdmac->watermark_level = dmaengine_cfg->src_maxburst;
sdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
}
+ sdmac->direction = dmaengine_cfg->direction;
return sdma_config_channel(sdmac);
default:
return -ENOSYS;
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
last_used = chan->cookie;
- dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+ sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
}
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 19a0c64d45d..74f70aadf9e 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
* callbacks but must be called with the lock held.
*/
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
- struct intel_mid_dma_desc *desc)
+ struct intel_mid_dma_desc *desc)
+ __releases(&midc->lock) __acquires(&midc->lock)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
midc->dma->block_size);
/*Populate SAR and DAR values*/
sg_phy_addr = sg_phys(sg);
- if (desc->dirn == DMA_TO_DEVICE) {
+ if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
- } else if (desc->dirn == DMA_FROM_DEVICE) {
+ } else if (desc->dirn == DMA_DEV_TO_MEM) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
}
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
+ spin_unlock_bh(&midc->lock);
last_complete = midc->completed;
last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
}
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_get_sync(&mid->pdev->dev);
if (mid->state == SUSPENDED) {
- if (dma_resume(mid->pdev)) {
+ if (dma_resume(&mid->pdev->dev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
LNW_PERIPHRAL_MASK_SIZE);
if (dma->mask_reg == NULL) {
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_ioremap;
}
} else
dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
err_engine:
free_irq(pdev->irq, dma);
err_irq:
+ if (dma->mask_reg)
+ iounmap(dma->mask_reg);
+err_ioremap:
pci_pool_destroy(dma->dma_pool);
err_dma_pool:
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
*
* This function is called by OS when a power event occurs
*/
-int dma_suspend(struct pci_dev *pci, pm_message_t state)
+static int dma_suspend(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int i;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
*
* This function is called by OS when a power event occurs
*/
-int dma_resume(struct pci_dev *pci)
+int dma_resume(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int ret;
struct middma_device *device = pci_get_drvdata(pci);
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
.runtime_suspend = dma_runtime_suspend,
.runtime_resume = dma_runtime_resume,
.runtime_idle = dma_runtime_idle,
+ .suspend = dma_suspend,
+ .resume = dma_resume,
};
static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
#ifdef CONFIG_PM
- .suspend = dma_suspend,
- .resume = dma_resume,
.driver = {
.pm = &intel_mid_dma_pm,
},
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index aea5ee88ce0..c83d35b97bd 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
unsigned int lli_length;
unsigned int current_lli;
dma_addr_t next;
- enum dma_data_direction dirn;
+ enum dma_transfer_direction dirn;
enum dma_status status;
enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
}
-int dma_resume(struct pci_dev *pci);
+int dma_resume(struct device *dev);
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e03f811a83d..04be90b645b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
-MODULE_ALIAS("platform:iop-adma");
-
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
},
};
-static int __init iop_adma_init (void)
-{
- return platform_driver_register(&iop_adma_driver);
-}
-
-static void __exit iop_adma_exit (void)
-{
- platform_driver_unregister(&iop_adma_driver);
- return;
-}
-module_exit(iop_adma_exit);
-module_init(iop_adma_init);
+module_platform_driver(iop_adma_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("IOP ADMA Engine Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iop-adma");
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 0e5ef33f90a..6212b16e8cf 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
case IPU_PIX_FMT_RGB565:
params->ip.bpp = 2;
params->ip.pfs = 4;
- params->ip.npb = 7;
+ params->ip.npb = 15;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 5; /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->pp.nsb = 1;
}
-static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
- uint16_t burst_pixels)
-{
- params->pp.npb = burst_pixels - 1;
-}
-
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
ipu_ch_param_set_rotation(&params, rot_mode);
- /* Some channels (rotation) have restriction on burst length */
- switch (channel) {
- case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
- invalid - Table 44-30 */
-/*
- ipu_ch_param_set_burst_size(&params, 8);
- */
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
- ipu_ch_param_set_burst_size(&params, 16);
- break;
- case IDMAC_IC_0:
- default:
- break;
- }
spin_lock_irqsave(&ipu->lock, flags);
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long tx_flags)
+ enum dma_transfer_direction direction, unsigned long tx_flags)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 8ba4edc6185..4d6d4cf6694 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
},
};
-static int __init mpc_dma_init(void)
-{
- return platform_driver_register(&mpc_dma_driver);
-}
-module_init(mpc_dma_init);
-
-static void __exit mpc_dma_exit(void)
-{
- platform_driver_unregister(&mpc_dma_driver);
-}
-module_exit(mpc_dma_exit);
+module_platform_driver(mpc_dma_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 9a353c2216d..e779b434af4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1250,7 +1250,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
static void
mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->xor_base;
u32 win_enable = 0;
@@ -1264,7 +1264,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -1290,7 +1290,7 @@ static struct platform_driver mv_xor_driver = {
static int mv_xor_shared_probe(struct platform_device *pdev)
{
- struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct mv_xor_shared_private *msp;
struct resource *res;
@@ -1323,8 +1323,9 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (msd != NULL && msd->dram != NULL)
- mv_xor_conf_mbus_windows(msp, msd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_xor_conf_mbus_windows(msp, dram);
return 0;
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b4588bdd98b..b06cd4ca626 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -44,7 +44,6 @@
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
#define BP_APBH_CTRL0_RESET_CHANNEL 16
#define HW_APBHX_CTRL1 0x010
#define HW_APBHX_CTRL2 0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
+ int desc_count;
dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
-static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
-{
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int chan_id = mxs_chan->chan.chan_id;
- int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
-
- /* enable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- }
-}
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- /* clkgate needs to be enabled before writing other registers */
- mxs_dma_clkgate(mxs_chan, 1);
-
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- /* disable apbh channel clock */
- mxs_dma_clkgate(mxs_chan, 0);
-
mxs_chan->status = DMA_SUCCESS;
}
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handler here in case of ether it's (1) an bus
+ * an error we need to handle here in case of either it's (1) a bus
* error or (2) a termination error with no completion.
*/
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -334,14 +311,11 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
goto err_irq;
}
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
goto err_clk;
- /* clkgate needs to be enabled for reset to finish */
- mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
- mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -372,12 +346,12 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
- clk_disable(mxs_dma->clk);
+ clk_disable_unprepare(mxs_dma->clk);
}
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long append)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
- static int idx;
+ int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
idx = 0;
}
- if (direction == DMA_NONE) {
+ if (direction == DMA_TRANS_NONE) {
ccw = &mxs_chan->ccw[idx++];
pio = (u32 *) sgl;
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
COMMAND);
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
}
}
}
+ mxs_chan->desc_count = idx;
return &mxs_chan->desc;
@@ -472,7 +447,7 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
i++;
}
+ mxs_chan->desc_count = i;
return &mxs_chan->desc;
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- mxs_dma_disable_chan(mxs_chan);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_disable_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -578,9 +554,9 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
- goto err_out;
+ return ret;
ret = mxs_reset_block(mxs_dma->base);
if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
- clk_disable(mxs_dma->clk);
-
- return 0;
-
err_out:
+ clk_disable_unprepare(mxs_dma->clk);
return ret;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index a6d0e3dbed0..823f58179f9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,7 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
struct pch_dma_chan {
struct dma_chan chan;
void __iomem *membase;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
struct tasklet_struct tasklet;
unsigned long err_status;
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
return NULL;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
reg = pd_slave->rx_reg;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
reg = pd_slave->tx_reg;
else
return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
{ 0, },
};
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 571041477ab..b8ec03ee8e2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -19,6 +19,7 @@
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <linux/of.h>
#define NR_DEFAULT_DESC 16
@@ -116,6 +117,9 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+/* forward declaration */
+static struct amba_driver pl330_driver;
+
static inline struct dma_pl330_chan *
to_pchan(struct dma_chan *ch)
{
@@ -267,6 +271,32 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+bool pl330_filter(struct dma_chan *chan, void *param)
+{
+ u8 *peri_id;
+
+ if (chan->device->dev->driver != &pl330_driver.drv)
+ return false;
+
+#ifdef CONFIG_OF
+ if (chan->device->dev->of_node) {
+ const __be32 *prop_value;
+ phandle phandle;
+ struct device_node *node;
+
+ prop_value = ((struct property *)param)->value;
+ phandle = be32_to_cpup(prop_value++);
+ node = of_find_node_by_phandle(phandle);
+ return ((chan->private == node) &&
+ (chan->chan_id == be32_to_cpup(prop_value)));
+ }
+#endif
+
+ peri_id = chan->private;
+ return *peri_id == (unsigned)param;
+}
+EXPORT_SYMBOL(pl330_filter);
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -320,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
- if (slave_config->direction == DMA_TO_DEVICE) {
+ if (slave_config->direction == DMA_MEM_TO_DEV) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
- } else if (slave_config->direction == DMA_FROM_DEVICE) {
+ } else if (slave_config->direction == DMA_DEV_TO_MEM) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
@@ -497,7 +527,7 @@ pluck_desc(struct dma_pl330_dmac *pdmac)
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
{
struct dma_pl330_dmac *pdmac = pch->dmac;
- struct dma_pl330_peri *peri = pch->chan.private;
+ u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
@@ -522,13 +552,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- if (peri) {
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = pch->chan.chan_id;
- } else {
- desc->req.rqtype = MEMTOMEM;
- desc->req.peri = 0;
- }
+ desc->req.peri = peri_id ? pch->chan.chan_id : 0;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -597,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -612,15 +636,17 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
}
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
@@ -646,16 +672,12 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct pl330_info *pi;
int burst;
if (unlikely(!pch || !len))
return NULL;
- if (peri && peri->rqtype != MEMTOMEM)
- return NULL;
-
pi = &pch->dmac->pif;
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
@@ -664,6 +686,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = MEMTOMEM;
/* Select max possible burst size */
burst = pi->pcfg.data_bus_width / 8;
@@ -687,30 +710,19 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flg)
{
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
int i;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len || !peri))
+ if (unlikely(!pch || !sgl || !sg_len))
return NULL;
- /* Make sure the direction is consistent */
- if ((direction == DMA_TO_DEVICE &&
- peri->rqtype != MEMTODEV) ||
- (direction == DMA_FROM_DEVICE &&
- peri->rqtype != DEVTOMEM)) {
- dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
- __func__, __LINE__);
- return NULL;
- }
-
addr = pch->fifo_addr;
first = NULL;
@@ -747,14 +759,16 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
else
list_add_tail(&desc->node, &first->node);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
fill_px(&desc->px,
addr, sg_dma_address(sg), sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
fill_px(&desc->px,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
@@ -820,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
-#ifdef CONFIG_PM_RUNTIME
- /* to use the runtime PM helper functions */
- pm_runtime_enable(&adev->dev);
-
- /* enable the power domain */
- if (pm_runtime_get_sync(&adev->dev)) {
- dev_err(&adev->dev, "failed to get runtime pm\n");
- ret = -ENODEV;
- goto probe_err1;
- }
-#else
+#ifndef CONFIG_PM_RUNTIME
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
@@ -856,32 +860,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
+ (u8)pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i];
- if (pdat) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
-
- switch (peri->rqtype) {
- case MEMTOMEM:
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- dma_cap_set(DMA_CYCLIC, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
- }
- pch->chan.private = peri;
- } else {
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- pch->chan.private = NULL;
- }
+ if (!adev->dev.of_node)
+ pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
+ else
+ pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
@@ -894,6 +882,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
pd->dev = &adev->dev;
+ if (pdat) {
+ pd->cap_mask = pdat->cap_mask;
+ } else {
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ if (pi->pcfg.num_peri) {
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ }
+ }
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
@@ -970,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
-#ifdef CONFIG_PM_RUNTIME
- pm_runtime_put(&adev->dev);
- pm_runtime_disable(&adev->dev);
-#else
+#ifndef CONFIG_PM_RUNTIME
clk_disable(pdmac->clk);
#endif
@@ -990,6 +984,8 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl330_ids);
+
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 81809c2b46a..54043cd831c 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
-#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, shdev->chan_reg +
+ shdev->pdata->channel[sh_dc->id].chclr_offset);
+}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ chclr_write(sh_chan, 0);
+ }
+ }
+
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
return -EIO;
}
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->common.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
return 0;
}
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
+ } else {
+ sh_chan->pm_state = DMAE_PM_PENDING;
}
spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
* @sh_chan: DMA channel
* @flags: DMA transfer flags
* @dest: destination DMA address, incremented when direction equals
- * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_DEV_TO_MEM
* @src: source DMA address, incremented when direction equals
- * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_MEM_TO_DEV
* @len: DMA transfer length
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
* @direction: needed for slave DMA to decide which address to keep constant,
- * equals DMA_BIDIRECTIONAL for MEMCPY
+ * equals DMA_MEM_TO_MEM for MEMCPY
* Returns 0 or an error
* Locks: called with desc_lock held
*/
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
- struct sh_desc **first, enum dma_data_direction direction)
+ struct sh_desc **first, enum dma_transfer_direction direction)
{
struct sh_desc *new;
size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
new->direction = direction;
*len -= copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
*src += copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
*dest += copy_size;
return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
* converted to scatter-gather to guarantee consistent locking and a correct
* list manipulation. For slave DMA direction carries the usual meaning, and,
* logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
* and the SG list contains only one element and points at the source buffer.
*/
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct scatterlist *sg;
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
i, sg, len, (unsigned long long)sg_addr);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
new = sh_dmae_add_desc(sh_chan, flags,
&sg_addr, addr, &len, &first,
direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+ return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
flags);
}
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
- ((desc->direction == DMA_FROM_DEVICE &&
+ ((desc->direction == DMA_DEV_TO_MEM &&
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, shdev);
+ shdev->common.dev = &pdev->dev;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_control = sh_dmae_control;
- shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
- struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- if (sh_chan->descs_allocated)
- sh_chan->pm_error = pm_runtime_put_sync(dev);
- }
-
return 0;
}
static int sh_dmae_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
if (!sh_chan->descs_allocated)
continue;
- if (!sh_chan->pm_error)
- pm_runtime_get_sync(dev);
-
if (param) {
const struct sh_dmae_slave_config *cfg = param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 00000000000..2333810d168
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,707 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS 16
+#define SIRFSOC_DMA_CHANNELS 16
+
+#define SIRFSOC_DMA_CH_ADDR 0x00
+#define SIRFSOC_DMA_CH_XLEN 0x04
+#define SIRFSOC_DMA_CH_YLEN 0x08
+#define SIRFSOC_DMA_CH_CTRL 0x0C
+
+#define SIRFSOC_DMA_WIDTH_0 0x100
+#define SIRFSOC_DMA_CH_VALID 0x140
+#define SIRFSOC_DMA_CH_INT 0x144
+#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT 4
+#define SIRFSOC_DMA_DIR_CTRL_BIT 5
+
+/* xlen and dma_width register is in 4 bytes boundary */
+#define SIRFSOC_DMA_WORD_LEN 4
+
+struct sirfsoc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct list_head node;
+
+ /* SiRFprimaII 2D-DMA parameters */
+
+ int xlen; /* DMA xlen */
+ int ylen; /* DMA ylen */
+ int width; /* DMA width */
+ int dir;
+ bool cyclic; /* is loop DMA? */
+ u32 addr; /* DMA buffer address */
+};
+
+struct sirfsoc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ dma_cookie_t completed_cookie;
+ unsigned long happened_cyclic;
+ unsigned long completed_cyclic;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+
+ int mode;
+};
+
+struct sirfsoc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
+ void __iomem *base;
+ int irq;
+};
+
+#define DRV_NAME "sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+ return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/* Execute all queued DMA descriptors */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+
+ /*
+ * lock has been held by functions calling this, so we don't hold
+ * lock again
+ */
+
+ sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
+ node);
+ /* Move the first queued descriptor to active list */
+ list_move_tail(&schan->queued, &schan->active);
+
+ /* Start the DMA transfer */
+ writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
+ cid * 4);
+ writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+ (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
+ sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
+ (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /*
+ * writel has an implict memory write barrier to make sure data is
+ * flushed into memory before starting DMA
+ */
+ writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+
+ if (sdesc->cyclic) {
+ writel((1 << cid) | 1 << (cid + 16) |
+ readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ schan->happened_cyclic = schan->completed_cyclic = 0;
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+ struct sirfsoc_dma *sdma = data;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ u32 is;
+ int ch;
+
+ is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
+ while ((ch = fls(is) - 1) >= 0) {
+ is &= ~(1 << ch);
+ writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
+ schan = &sdma->channels[ch];
+
+ spin_lock(&schan->lock);
+
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+ if (!sdesc->cyclic) {
+ /* Execute queued descriptors */
+ list_splice_tail_init(&schan->active, &schan->completed);
+ if (!list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+ } else
+ schan->happened_cyclic++;
+
+ spin_unlock(&schan->lock);
+ }
+
+ /* Schedule tasklet */
+ tasklet_schedule(&sdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+ dma_cookie_t last_cookie = 0;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ unsigned long happened_cyclic;
+ LIST_HEAD(list);
+ int i;
+
+ for (i = 0; i < sdma->dma.chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ if (!list_empty(&schan->completed)) {
+ list_splice_tail_init(&schan->completed, &list);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(sdesc, &list, node) {
+ desc = &sdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&list, &schan->free);
+ schan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+ } else {
+ /* for cyclic channel, desc is always in active list */
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+
+ if (!sdesc || (sdesc && !sdesc->cyclic)) {
+ /* without active cyclic DMA */
+ spin_unlock_irqrestore(&schan->lock, flags);
+ continue;
+ }
+
+ /* cyclic DMA */
+ happened_cyclic = schan->happened_cyclic;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ desc = &sdesc->desc;
+ while (happened_cyclic != schan->completed_cyclic) {
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+ schan->completed_cyclic++;
+ }
+ }
+ }
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+ struct sirfsoc_dma *sdma = (void *)data;
+
+ sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&sdesc->node, &schan->queued);
+
+ /* Update cookie */
+ cookie = schan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ schan->chan.cookie = cookie;
+ sdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+ struct dma_slave_config *config)
+{
+ unsigned long flags;
+
+ if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ schan->mode = (config->src_maxburst == 4 ? 1 : 0);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&schan->active, &schan->free);
+ list_splice_tail_init(&schan->queued, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct dma_slave_config *config;
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return sirfsoc_dma_terminate_all(schan);
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return sirfsoc_dma_slave_config(schan, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+ sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
+ if (!sdesc) {
+ dev_notice(sdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&sdesc->desc, chan);
+ sdesc->desc.flags = DMA_CTRL_ACK;
+ sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+ list_add_tail(&sdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ list_splice_tail_init(&descs, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return i;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&schan->prepared));
+ BUG_ON(!list_empty(&schan->queued));
+ BUG_ON(!list_empty(&schan->active));
+ BUG_ON(!list_empty(&schan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&schan->free, &descs);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(sdesc, tmp, &descs, node)
+ kfree(sdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (list_empty(&schan->active) && !list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ last_used = schan->chan.cookie;
+ last_complete = schan->completed_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+ int ret;
+
+ if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+ ret = -EINVAL;
+ goto err_dir;
+ }
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc) {
+ /* try to free completed descriptors */
+ sirfsoc_dma_process_completed(sdma);
+ ret = 0;
+ goto no_desc;
+ }
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+
+ /*
+ * Number of chunks in a frame can only be 1 for prima2
+ * and ylen (number of frame - 1) must be at least 0
+ */
+ if ((xt->frame_size == 1) && (xt->numf > 0)) {
+ sdesc->cyclic = 0;
+ sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
+ sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
+ SIRFSOC_DMA_WORD_LEN;
+ sdesc->ylen = xt->numf - 1;
+ if (xt->dir == DMA_MEM_TO_DEV) {
+ sdesc->addr = xt->src_start;
+ sdesc->dir = 1;
+ } else {
+ sdesc->addr = xt->dst_start;
+ sdesc->dir = 0;
+ }
+
+ list_add_tail(&sdesc->node, &schan->prepared);
+ } else {
+ pr_err("sirfsoc DMA Invalid xfer\n");
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+err_xfer:
+ spin_unlock_irqrestore(&schan->lock, iflags);
+no_desc:
+err_dir:
+ return ERR_PTR(ret);
+}
+
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+
+ /*
+ * we only support cycle transfer with 2 period
+ * If the X-length is set to 0, it would be the loop mode.
+ * The DMA address keeps increasing until reaching the end of a loop
+ * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
+ * the DMA address goes back to the beginning of this area.
+ * In loop mode, the DMA data region is divided into two parts, BUFA
+ * and BUFB. DMA controller generates interrupts twice in each loop:
+ * when the DMA address reaches the end of BUFA or the end of the
+ * BUFB
+ */
+ if (buf_len != 2 * period_len)
+ return ERR_PTR(-EINVAL);
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc)
+ return 0;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+ sdesc->addr = addr;
+ sdesc->cyclic = 1;
+ sdesc->xlen = 0;
+ sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
+ sdesc->width = 1;
+ list_add_tail(&sdesc->node, &schan->prepared);
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ unsigned int ch_nr = (unsigned int) chan_id;
+
+ if (ch_nr == chan->chan_id +
+ chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct sirfsoc_dma *sdma;
+ struct sirfsoc_dma_chan *schan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ u32 id;
+ int ret, i;
+
+ sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
+ if (!sdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(dn, "cell-index", &id)) {
+ dev_err(dev, "Fail to get DMAC index\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ sdma->irq = irq_of_parse_and_map(dn, 0);
+ if (sdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ ret = -EINVAL;
+ goto free_mem;
+ }
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret) {
+ dev_err(dev, "Error parsing memory region!\n");
+ goto free_mem;
+ }
+
+ regs_start = res.start;
+ regs_size = resource_size(&res);
+
+ sdma->base = devm_ioremap(dev, regs_start, regs_size);
+ if (!sdma->base) {
+ dev_err(dev, "Error mapping memory region!\n");
+ ret = -ENOMEM;
+ goto irq_dispose;
+ }
+
+ ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+ sdma);
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ ret = -EINVAL;
+ goto unmap_mem;
+ }
+
+ dma = &sdma->dma;
+ dma->dev = dev;
+ dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+ dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+ dma->device_issue_pending = sirfsoc_dma_issue_pending;
+ dma->device_control = sirfsoc_dma_control;
+ dma->device_tx_status = sirfsoc_dma_tx_status;
+ dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
+ dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ schan->chan.device = dma;
+ schan->chan.cookie = 1;
+ schan->completed_cookie = schan->chan.cookie;
+
+ INIT_LIST_HEAD(&schan->free);
+ INIT_LIST_HEAD(&schan->prepared);
+ INIT_LIST_HEAD(&schan->queued);
+ INIT_LIST_HEAD(&schan->active);
+ INIT_LIST_HEAD(&schan->completed);
+
+ spin_lock_init(&schan->lock);
+ list_add_tail(&schan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, sdma);
+ ret = dma_async_device_register(dma);
+ if (ret)
+ goto free_irq;
+
+ dev_info(dev, "initialized SIRFSOC DMAC driver\n");
+
+ return 0;
+
+free_irq:
+ devm_free_irq(dev, sdma->irq, sdma);
+irq_dispose:
+ irq_dispose_mapping(sdma->irq);
+unmap_mem:
+ iounmap(sdma->base);
+free_mem:
+ devm_kfree(dev, sdma);
+ return ret;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&sdma->dma);
+ devm_free_irq(dev, sdma->irq, sdma);
+ irq_dispose_mapping(sdma->irq);
+ iounmap(sdma->base);
+ devm_kfree(dev, sdma);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+ { .compatible = "sirf,prima2-dmac", },
+ {},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+ .probe = sirfsoc_dma_probe,
+ .remove = __devexit_p(sirfsoc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_dma_match,
+ },
+};
+
+module_platform_driver(sirfsoc_dma_driver);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 13259cad0ce..cc5ecbc067a 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
@@ -32,6 +34,9 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY 100
+
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
@@ -62,6 +67,55 @@ enum d40_command {
D40_DMA_SUSPENDED = 3
};
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+ D40_DREG_LCPA,
+ D40_DREG_LCLA,
+ D40_DREG_PRMSE,
+ D40_DREG_PRMSO,
+ D40_DREG_PRMOE,
+ D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+ D40_DREG_PSEG1,
+ D40_DREG_PSEG2,
+ D40_DREG_PSEG3,
+ D40_DREG_PSEG4,
+ D40_DREG_PCEG1,
+ D40_DREG_PCEG2,
+ D40_DREG_PCEG3,
+ D40_DREG_PCEG4,
+ D40_DREG_RSEG1,
+ D40_DREG_RSEG2,
+ D40_DREG_RSEG3,
+ D40_DREG_RSEG4,
+ D40_DREG_RCEG1,
+ D40_DREG_RCEG2,
+ D40_DREG_RCEG3,
+ D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+ D40_CHAN_REG_SSCFG,
+ D40_CHAN_REG_SSELT,
+ D40_CHAN_REG_SSPTR,
+ D40_CHAN_REG_SSLNK,
+ D40_CHAN_REG_SDCFG,
+ D40_CHAN_REG_SDELT,
+ D40_CHAN_REG_SDPTR,
+ D40_CHAN_REG_SDLNK,
+};
+
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
@@ -96,7 +150,7 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
*
* This descriptor is used for both logical and physical transfers.
*/
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
* channels.
*
* @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
*/
struct d40_phy_res {
spinlock_t lock;
+ bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
- enum dma_data_direction runtime_direction;
+ enum dma_transfer_direction runtime_direction;
};
/**
@@ -241,6 +296,7 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
+ struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
+ u32 reg_val_backup[BACKUP_REGS_SZ];
+ u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 *reg_val_backup_chan;
+ u16 gcc_pwr_off_mask;
+ bool initialized;
};
/**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
struct d40_desc *d;
struct d40_desc *_d;
- list_for_each_entry_safe(d, _d, &d40c->client, node)
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
+ }
}
if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
+ bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
&lli->src[lli_current],
next_lcla, flags);
- dma_sync_single_range_for_device(chan->base->dev,
- pool->dma_addr, lcla_offset,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
-
+ /*
+ * Cache maintenance is not needed if lcla is
+ * mapped in esram
+ */
+ if (!use_esram_lcla) {
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ }
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->rev >= 3)
+ dma40_backup(base->virtbase, base->reg_val_backup_v3,
+ d40_backup_regs_v3,
+ ARRAY_SIZE(d40_backup_regs_v3),
+ save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
/* Set LIDX for lcla */
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
+
+ /* Clear LNK which will be used by d40_chan_has_events() */
+ writel(0, chanbase + D40_CHAN_REG_SSLNK);
+ writel(0, chanbase + D40_CHAN_REG_SDLNK);
}
}
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
+ pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
D40_DMA_RUN);
}
}
-
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
-
+ pm_runtime_get_sync(d40c->base->dev);
if (d40c->base->rev == 0)
if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
}
no_suspend:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- d40c->busy = true;
+ if (!d40c->busy)
+ d40c->busy = true;
+
+ pm_runtime_get_sync(d40c->base->dev);
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40_queue_start(d40c) == NULL)
d40c->busy = false;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
return res;
}
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
- int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+ bool is_src, int log_event_line, bool is_log,
+ bool *first_user)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
+
+ *first_user = ((phy->allocated_src | phy->allocated_dst)
+ == D40_ALLOC_FREE);
+
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ out:
return is_free;
}
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
int dev_type;
int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
for (i = 0; i < d40c->base->num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log))
+ 0, is_log,
+ first_phy_user))
goto found_phy;
}
} else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
- is_log))
+ is_log,
+ first_phy_user))
goto found_phy;
}
}
@@ -1552,6 +1703,25 @@ found_phy:
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
+
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
+
+ if ((i != phy_num) && (i != phy_num + 1)) {
+ dev_err(chan2dev(d40c),
+ "invalid fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
+ if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+ is_log, first_phy_user))
+ goto found_log;
+
+ dev_err(chan2dev(d40c),
+ "could not allocate fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ found_phy:
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
}
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
chan_err(d40c, "suspend failed\n");
- return res;
+ goto out;
}
if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
if (d40_chan_has_events(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
- if (res) {
+ if (res)
chan_err(d40c,
"Executing RUN command\n");
- return res;
- }
}
- return 0;
+ goto out;
}
} else {
(void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "Failed to stop channel\n");
- return res;
+ goto out;
}
+
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+
+ d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
- return 0;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ return res;
}
static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ err:
}
static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
if (chan->runtime_addr)
return chan->runtime_addr;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
addr = plat->dev_rx[cfg->src_dev_type];
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
addr = plat->dev_tx[cfg->dst_dev_type];
return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long dma_flags)
+ enum dma_transfer_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (direction != DMA_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
src_dev_addr = dev_addr;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
dst_dev_addr = dev_addr;
}
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
goto fail;
}
}
- is_free_phy = (d40c->phy_chan == NULL);
- err = d40_allocate_channel(d40c);
+ err = d40_allocate_channel(d40c, &is_free_phy);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
+ d40c->configured = false;
goto fail;
}
+ pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
+ dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+ chan_is_logical(d40c) ? "logical" : "physical",
+ d40c->phy_chan->num,
+ d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (is_free_phy)
d40_config_write(d40c);
fail:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
- enum dma_data_direction direction,
+ enum dma_transfer_direction direction,
unsigned long dma_flags)
{
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_addr_width = config->dst_addr_width;
dst_maxburst = config->dst_maxburst;
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
dma_addr_t dev_addr_rx =
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (dst_maxburst == 0)
dst_maxburst = src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
dma_addr_t dev_addr_tx =
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
"configured channel %s for %s, data width %d/%d, "
"maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
src_addr_width, dst_addr_width,
src_maxburst, dst_maxburst);
@@ -2519,6 +2709,72 @@ failure1:
return err;
}
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+ if (!pm_runtime_suspended(dev))
+ return -EBUSY;
+
+ if (base->lcpa_regulator)
+ ret = regulator_disable(base->lcpa_regulator);
+ return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ d40_save_restore_registers(base, true);
+
+ /* Don't disable/enable clocks for v1 due to HW bugs */
+ if (base->rev != 1)
+ writel_relaxed(base->gcc_pwr_off_mask,
+ base->virtbase + D40_DREG_GCC);
+
+ return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ if (base->initialized)
+ d40_save_restore_registers(base, false);
+
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+ base->virtbase + D40_DREG_GCC);
+ return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator)
+ ret = regulator_enable(base->lcpa_regulator);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+ .suspend = dma40_pm_suspend,
+ .runtime_suspend = dma40_runtime_suspend,
+ .runtime_resume = dma40_runtime_resume,
+ .resume = dma40_resume,
+};
+#define DMA40_PM_OPS (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS NULL
+#endif
+
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
+ int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[i].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_DST);
+
+
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[chan].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
val[0] = val[0] >> 2;
}
+ /*
+ * To keep things simple, Enable all clocks initially.
+ * The clocks will get managed later post channel allocation.
+ * The clocks for the event lines on which reserved channels exists
+ * are not managed here.
+ */
+ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+ base->gcc_pwr_off_mask = gcc;
+
return num_phy_chans_avail;
}
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
- sizeof(struct d40_desc *) *
- D40_LCLA_LINK_PER_EVENT_GRP,
+ base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+ sizeof(d40_backup_regs_chan),
GFP_KERNEL);
+ if (!base->reg_val_backup_chan)
+ goto failure;
+
+ base->lcla_pool.alloc_map =
+ kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+ * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2741,9 +3025,9 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static const struct d40_reg_val dma_init_reg[] = {
+ static struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
+ /* If lcla has to be located in ESRAM we don't need to allocate */
+ if (base->plat_data->use_esram_lcla) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lcla_esram");
+ if (!res) {
+ ret = -ENOENT;
+ d40_err(&pdev->dev,
+ "No \"lcla_esram\" memory resource\n");
+ goto failure;
+ }
+ base->lcla_pool.base = ioremap(res->start,
+ resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+ goto failure;
+ }
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
- ret = d40_lcla_allocate(base);
- if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
+ } else {
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ goto failure;
+ }
}
spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_enable(base->dev);
+ pm_runtime_resume(base->dev);
+
+ if (base->plat_data->use_esram_lcla) {
+
+ base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+ if (IS_ERR(base->lcpa_regulator)) {
+ d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret) {
+ d40_err(&pdev->dev,
+ "Failed to enable lcpa_regulator\n");
+ regulator_put(base->lcpa_regulator);
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+ }
+
+ base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
@@ -2976,6 +3306,11 @@ failure:
if (base->virtbase)
iounmap(base->virtbase);
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
+
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ failure:
clk_put(base->clk);
}
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
+ }
+
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
+ .pm = DMA40_PM_OPS,
},
};
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index b44c455158d..8d3d490968a 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -16,6 +16,8 @@
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
/* Most bits of the CFG register are the same in log as in phy mode */
#define D40_SREG_CFG_MST_POS 15
@@ -123,6 +125,15 @@
/* DMA Register Offsets */
#define D40_DREG_GCC 0x000
+#define D40_DREG_GCC_ENA 0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+ (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
#define D40_DREG_PRTYP 0x004
#define D40_DREG_PRSME 0x008
#define D40_DREG_PRSMO 0x00C
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a4a398f2ef6..a6f9c1684a0 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -90,7 +90,7 @@ struct timb_dma_chan {
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
- if (td_chan->direction == DMA_FROM_DEVICE) {
+ if (td_chan->direction == DMA_DEV_TO_MEM) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
txd->cookie);
/* make sure to stop the transfer */
- if (td_chan->direction == DMA_FROM_DEVICE)
+ if (td_chan->direction == DMA_DEV_TO_MEM)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_TO_DEVICE);
+ td_desc->desc_list_len, DMA_MEM_TO_DEV);
return &td_desc->txd;
}
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
- td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE;
+ td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
+ DMA_MEM_TO_DEV;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
.remove = __exit_p(td_remove),
};
-static int __init td_init(void)
-{
- return platform_driver_register(&td_driver);
-}
-module_init(td_init);
-
-static void __exit td_exit(void)
-{
- platform_driver_unregister(&td_driver);
-}
-module_exit(td_exit);
+module_platform_driver(td_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index cbd83e362b5..6122c364cf1 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
BUG_ON(!ds || !ds->reg_width);
if (ds->tx_reg)
- BUG_ON(direction != DMA_TO_DEVICE);
+ BUG_ON(direction != DMA_MEM_TO_DEV);
else
- BUG_ON(direction != DMA_FROM_DEVICE);
+ BUG_ON(direction != DMA_DEV_TO_MEM);
if (unlikely(!sg_len))
return NULL;
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
if (__is_dmac64(ddev)) {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc.SAR = mem;
desc->hwdesc.DAR = ds->tx_reg;
} else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc.CNTR = sg_dma_len(sg);
} else {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc32.SAR = mem;
desc->hwdesc32.DAR = ds->tx_reg;
} else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc32.CNTR = sg_dma_len(sg);
}
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
sai = ds->reg_width;
dai = 0;
} else {