summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-03-08 20:21:04 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-03-08 20:21:04 +0000
commit988addf82e4c03739375279de73929580a2d4a6a (patch)
tree989ae1cd4e264bbad80c65f04480486246e7b9f3 /drivers/dma
parent004c1c7096659d352b83047a7593e91d8a30e3c5 (diff)
parent25cf84cf377c0aae5dbcf937ea89bc7893db5176 (diff)
Merge branch 'origin' into devel-stable
Conflicts: arch/arm/mach-mx2/devices.c arch/arm/mach-mx2/devices.h sound/soc/pxa/pxa-ssp.c
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile8
-rw-r--r--drivers/dma/coh901318.c182
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c1177
-rw-r--r--drivers/dma/fsldma.h35
-rw-r--r--drivers/dma/ioat/dma.c48
-rw-r--r--drivers/dma/ioat/dma.h11
-rw-r--r--drivers/dma/ioat/dma_v2.c70
-rw-r--r--drivers/dma/ioat/dma_v2.h6
-rw-r--r--drivers/dma/ioat/dma_v3.c64
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c15
-rw-r--r--drivers/dma/mpc512x_dma.c800
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/shdma.c841
-rw-r--r--drivers/dma/shdma.h31
19 files changed, 2291 insertions, 1057 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e02d74b1e89..c27f80e5d53 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -13,6 +13,22 @@ menuconfig DMADEVICES
DMA Device drivers supported by the configured arch, it may
be empty in some cases.
+config DMADEVICES_DEBUG
+ bool "DMA Engine debugging"
+ depends on DMADEVICES != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables DMA engine core and driver debugging.
+
+config DMADEVICES_VDEBUG
+ bool "DMA Engine verbose debugging"
+ depends on DMADEVICES_DEBUG != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables deeper (more verbose) debugging of
+ the DMA engine core and drivers.
+
+
if DMADEVICES
comment "DMA Devices"
@@ -69,6 +85,13 @@ config FSL_DMA
The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts.
+config MPC512X_DMA
+ tristate "Freescale MPC512x built-in DMA engine support"
+ depends on PPC_MPC512x
+ select DMA_ENGINE
+ ---help---
+ Enable support for the Freescale MPC512x built-in DMA engine.
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 807053d4823..22bba3d5e2b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,9 +1,17 @@
+ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
+ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
+ EXTRA_CFLAGS += -DVERBOSE_DEBUG
+endif
+
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 64a937262a4..1656fdcdb6c 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,6 @@ struct coh901318_desc {
unsigned int sg_len;
struct coh901318_lli *data;
enum dma_data_direction dir;
- int pending_irqs;
unsigned long flags;
};
@@ -72,7 +71,6 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- int pending_irqs;
struct coh901318_base *base;
};
@@ -80,18 +78,16 @@ struct coh901318_chan {
static void coh901318_list_print(struct coh901318_chan *cohc,
struct coh901318_lli *lli)
{
- struct coh901318_lli *l;
- dma_addr_t addr = virt_to_phys(lli);
+ struct coh901318_lli *l = lli;
int i = 0;
- while (addr) {
- l = phys_to_virt(addr);
+ while (l) {
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
- ", dst 0x%x, link 0x%x link_virt 0x%p\n",
+ ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr,
- l->link_addr, phys_to_virt(l->link_addr));
+ l->link_addr, l->virt_link_addr);
i++;
- addr = l->link_addr;
+ l = l->virt_link_addr;
}
}
@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
goto err_kmalloc;
tmp = dev_buf;
- tmp += sprintf(tmp, "DMA -- enable dma channels\n");
+ tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
if (started_channels & (1 << i))
@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
* TODO: alloc a pile of descs instead of just one,
* avoid many small allocations.
*/
- desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
+ desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
if (desc == NULL)
goto out;
INIT_LIST_HEAD(&desc->node);
+ dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
} else {
/* Reuse an old desc. */
desc = list_first_entry(&cohc->free,
struct coh901318_desc,
node);
list_del(&desc->node);
+ /* Initialize it a bit so it's not insane */
+ desc->sg = NULL;
+ desc->sg_len = 0;
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
}
out:
@@ -364,10 +366,6 @@ static void
coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{
list_add_tail(&desc->node, &cohc->active);
-
- BUG_ON(cohc->pending_irqs != 0);
-
- cohc->pending_irqs = desc->pending_irqs;
}
static struct coh901318_desc *
@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
return cohd_que;
}
+/*
+ * This tasklet is called from the interrupt handler to
+ * handle each descriptor (DMA job) that is sent to a channel.
+ */
static void dma_tasklet(unsigned long data)
{
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
@@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data)
dma_async_tx_callback callback;
void *callback_param;
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
+ " nbr_active_done %ld\n", __func__,
+ cohc->id, cohc->nbr_active_done);
+
spin_lock_irqsave(&cohc->lock, flags);
- /* get first active entry from list */
+ /* get first active descriptor entry from list */
cohd_fin = coh901318_first_active_get(cohc);
- BUG_ON(cohd_fin->pending_irqs == 0);
-
if (cohd_fin == NULL)
goto err;
- cohd_fin->pending_irqs--;
- cohc->completed = cohd_fin->desc.cookie;
+ /* locate callback to client */
+ callback = cohd_fin->desc.callback;
+ callback_param = cohd_fin->desc.callback_param;
- if (cohc->nbr_active_done == 0)
- return;
+ /* sign this job as completed on the channel */
+ cohc->completed = cohd_fin->desc.cookie;
- if (!cohd_fin->pending_irqs) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- }
+ /* release the lli allocation and remove the descriptor */
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
- " nbr_active_done %ld\n", __func__,
- cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
+ /* return desc to free-list */
+ coh901318_desc_remove(cohd_fin);
+ coh901318_desc_free(cohc, cohd_fin);
- /* callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
-
- if (!cohd_fin->pending_irqs) {
- coh901318_desc_remove(cohd_fin);
+ spin_unlock_irqrestore(&cohc->lock, flags);
- /* return desc to free-list */
- coh901318_desc_free(cohc, cohd_fin);
- }
+ /* Call the callback when we're done */
+ if (callback)
+ callback(callback_param);
- if (cohc->nbr_active_done)
- cohc->nbr_active_done--;
+ spin_lock_irqsave(&cohc->lock, flags);
+ /*
+ * If another interrupt fired while the tasklet was scheduling,
+ * we don't get called twice, so we have this number of active
+ * counter that keep track of the number of IRQs expected to
+ * be handled for this channel. If there happen to be more than
+ * one IRQ to be ack:ed, we simply schedule this tasklet again.
+ */
+ cohc->nbr_active_done--;
if (cohc->nbr_active_done) {
+ dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
+ "came in while we were scheduling this tasklet\n");
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
tasklet_schedule(&cohc->tasklet);
}
- spin_unlock_irqrestore(&cohc->lock, flags);
- if (callback)
- callback(callback_param);
+ spin_unlock_irqrestore(&cohc->lock, flags);
return;
@@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (!cohc->allocated)
return;
- BUG_ON(cohc->pending_irqs == 0);
+ spin_lock(&cohc->lock);
- cohc->pending_irqs--;
cohc->nbr_active_done++;
- if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
+ if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
BUG_ON(list_empty(&cohc->active));
+ spin_unlock(&cohc->lock);
+
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int lli_len;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ int ret;
spin_lock_irqsave(&cohc->lock, flg);
@@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (data == NULL)
goto err;
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->data = data;
-
- cohd->pending_irqs =
- coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
- cohc_chan_param(cohc)->ctrl_lli_chained,
- ctrl_last);
- cohd->flags = flags;
+ ret = coh901318_lli_fill_memcpy(
+ &cohc->base->pool, data, src, size, dest,
+ cohc_chan_param(cohc)->ctrl_lli_chained,
+ ctrl_last);
+ if (ret)
+ goto err;
COH_DBG(coh901318_list_print(cohc, data));
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
+ /* Pick a descriptor to handle this transfer */
+ cohd = coh901318_desc_get(cohc);
+ cohd->data = data;
+ cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *data;
struct coh901318_desc *cohd;
+ const struct coh901318_params *params;
struct scatterlist *sg;
int len = 0;
int size;
@@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ u32 config;
unsigned long flg;
+ int ret;
if (!sgl)
goto out;
@@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->dir = direction;
+ params = cohc_chan_param(cohc);
+ config = params->config;
if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
+ config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
@@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
+ config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
ctrl_chained |= rx_flags;
ctrl_last |= rx_flags;
ctrl |= rx_flags;
} else
goto err_direction;
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
- cohd->desc.tx_submit = coh901318_tx_submit;
-
+ coh901318_set_conf(cohc, config);
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
@@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
len += factor;
}
+ pr_debug("Allocate %d lli:s for this transfer\n", len);
data = coh901318_lli_alloc(&cohc->base->pool, len);
if (data == NULL)
goto err_dma_alloc;
/* initiate allocated data list */
- cohd->pending_irqs =
- coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
- cohc_dev_addr(cohc),
- ctrl_chained,
- ctrl,
- ctrl_last,
- direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
- cohd->data = data;
-
- cohd->flags = flags;
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ cohc_dev_addr(cohc),
+ ctrl_chained,
+ ctrl,
+ ctrl_last,
+ direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
+ if (ret)
+ goto err_lli_fill;
COH_DBG(coh901318_list_print(cohc, data));
+ /* Pick a descriptor to handle this transfer */
+ cohd = coh901318_desc_get(cohc);
+ cohd->dir = direction;
+ cohd->flags = flags;
+ cohd->desc.tx_submit = coh901318_tx_submit;
+ cohd->data = data;
+
spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc;
+ err_lli_fill:
err_dma_alloc:
err_direction:
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flg);
out:
return NULL;
@@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
- coh901318_desc_remove(cohd);
-
/* return desc to free-list */
+ coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
}
@@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
- coh901318_desc_remove(cohd);
-
/* return desc to free-list */
+ coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
}
cohc->nbr_active_done = 0;
cohc->busy = 0;
- cohc->pending_irqs = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
}
@@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
spin_lock_init(&cohc->lock);
- cohc->pending_irqs = 0;
cohc->nbr_active_done = 0;
cohc->busy = 0;
INIT_LIST_HEAD(&cohc->free);
@@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
base->dma_memcpy.dev = &pdev->dev;
+ /*
+ * This controller can only access address at even 32bit boundaries,
+ * i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
err = dma_async_device_register(&base->dma_memcpy);
if (err)
goto err_register_memcpy;
- dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
+ dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index f5120f238a4..71d58c1a1e8 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head;
lli->phy_this = phy;
+ lli->link_addr = 0x00000000;
+ lli->virt_link_addr = 0x00000000U;
for (i = 1; i < len; i++) {
lli_prev = lli;
@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
+ lli->link_addr = 0x00000000;
+ lli->virt_link_addr = 0x00000000U;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;
}
- lli->link_addr = 0x00000000U;
-
spin_unlock(&pool->lock);
return head;
@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- /* One irq per single transfer */
- return 1;
+ return 0;
}
int
@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- /* One irq per single transfer */
- return 1;
+ return 0;
}
int
@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl_sg;
dma_addr_t src = 0;
dma_addr_t dst = 0;
- int nbr_of_irq = 0;
u32 bytes_to_transfer;
u32 elem_size;
@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if ((ctrl_sg & ctrl_irq_mask))
- nbr_of_irq++;
-
if (dir == DMA_TO_DEVICE)
/* increment source address */
- src = sg_dma_address(sg);
+ src = sg_phys(sg);
else
/* increment destination address */
- dst = sg_dma_address(sg);
+ dst = sg_phys(sg);
bytes_to_transfer = sg_dma_len(sg);
@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
}
spin_unlock(&pool->lock);
- /* There can be many IRQs per sg transfer */
- return nbr_of_irq;
+ return 0;
err:
spin_unlock(&pool->lock);
return -EINVAL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e7a3230fb7d..87399cafce3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -284,7 +284,7 @@ struct dma_chan_tbl_ent {
/**
* channel_table - percpu lookup table for memory-to-memory offload providers
*/
-static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
static int __init dma_channel_table_init(void)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 948d563941c..6fa55fe3dd2 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -237,7 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
- u8 pq_coefs[pq_sources];
+ u8 pq_coefs[pq_sources + 1];
int ret;
int src_cnt;
int dst_cnt;
@@ -257,7 +257,7 @@ static int dmatest_func(void *data)
} else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2;
- for (i = 0; i < pq_sources; i++)
+ for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
} else
goto err_srcs;
@@ -347,7 +347,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off,
- dma_srcs, xor_sources,
+ dma_srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
@@ -355,7 +355,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
- pq_sources, pq_coefs,
+ src_cnt, pq_coefs,
len, flags);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 296f9e747fa..bbb4be5a3ff 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,19 +37,19 @@
#include <asm/fsldma.h>
#include "fsldma.h"
-static void dma_init(struct fsl_dma_chan *fsl_chan)
+static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
+ DMA_OUT(chan, &chan->regs->mr, 0, 32);
- switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
+ switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
/* Set the channel to below modes:
* EIE - Error interrupt enable
* EOSIE - End of segments interrupt enable (basic mode)
* EOLNIE - End of links interrupt enable
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
break;
case FSL_DMA_IP_83XX:
@@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
* EOTIE - End-of-transfer interrupt enable
* PRC_RM - PCI read multiple
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
| FSL_DMA_MR_PRC_RM, 32);
break;
}
-
}
-static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
+static void set_sr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
+ DMA_OUT(chan, &chan->regs->sr, val, 32);
}
-static u32 get_sr(struct fsl_dma_chan *fsl_chan)
+static u32 get_sr(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
+ return DMA_IN(chan, &chan->regs->sr, 32);
}
-static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
+static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count)
{
- hw->count = CPU_TO_DMA(fsl_chan, count, 32);
+ hw->count = CPU_TO_DMA(chan, count, 32);
}
-static void set_desc_src(struct fsl_dma_chan *fsl_chan,
+static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
- hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
+ hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
-static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t dest)
+static void set_desc_dst(struct fsldma_chan *chan,
+ struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
- hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
+ hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
-static void set_desc_next(struct fsl_dma_chan *fsl_chan,
+static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0;
- hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
-}
-
-static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
-{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
+ hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}
-static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
+static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
+ DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}
-static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
+ return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
-static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
+static dma_addr_t get_ndar(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
+ return DMA_IN(chan, &chan->regs->ndar, 64);
}
-static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+static u32 get_bcr(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+ return DMA_IN(chan, &chan->regs->bcr, 32);
}
-static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
+static int dma_is_idle(struct fsldma_chan *chan)
{
- u32 sr = get_sr(fsl_chan);
+ u32 sr = get_sr(chan);
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}
-static void dma_start(struct fsl_dma_chan *fsl_chan)
+static void dma_start(struct fsldma_chan *chan)
{
- u32 mr_set = 0;
-
- if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
- mr_set |= FSL_DMA_MR_EMP_EN;
- } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- & ~FSL_DMA_MR_EMP_EN, 32);
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+ DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ mode |= FSL_DMA_MR_EMP_EN;
+ } else {
+ mode &= ~FSL_DMA_MR_EMP_EN;
+ }
}
- if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
- mr_set |= FSL_DMA_MR_EMS_EN;
+ if (chan->feature & FSL_DMA_CHAN_START_EXT)
+ mode |= FSL_DMA_MR_EMS_EN;
else
- mr_set |= FSL_DMA_MR_CS;
+ mode |= FSL_DMA_MR_CS;
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | mr_set, 32);
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
-static void dma_halt(struct fsl_dma_chan *fsl_chan)
+static void dma_halt(struct fsldma_chan *chan)
{
+ u32 mode;
int i;
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
- 32);
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
- | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode |= FSL_DMA_MR_CA;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+
+ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
for (i = 0; i < 100; i++) {
- if (dma_is_idle(fsl_chan))
- break;
+ if (dma_is_idle(chan))
+ return;
+
udelay(10);
}
- if (i >= 100 && !dma_is_idle(fsl_chan))
- dev_err(fsl_chan->dev, "DMA halt timeout!\n");
+
+ if (!dma_is_idle(chan))
+ dev_err(chan->dev, "DMA halt timeout!\n");
}
-static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
+static void set_ld_eol(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0;
- desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ desc->hw.next_ln_addr = CPU_TO_DMA(chan,
+ DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
| snoop_bits, 64);
}
-static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
- struct fsl_desc_sw *new_desc)
-{
- struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
-
- if (list_empty(&fsl_chan->ld_queue))
- return;
-
- /* Link to the new descriptor physical address and
- * Enable End-of-segment interrupt for
- * the last link descriptor.
- * (the previous node's next link descriptor)
- *
- * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
- */
- queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- new_desc->async_tx.phys | FSL_DMA_EOSIE |
- (((fsl_chan->feature & FSL_DMA_IP_MASK)
- == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
-}
-
/**
* fsl_chan_set_src_loop_size - Set source address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop
*
* The set source address hold transfer size. The source
@@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
* read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
* SA + 1 ... and so on.
*/
-static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
switch (size) {
case 0:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
- (~FSL_DMA_MR_SAHE), 32);
+ mode &= ~FSL_DMA_MR_SAHE;
break;
case 1:
case 2:
case 4:
case 8:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
- FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
- 32);
+ mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
break;
}
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
- * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
+ * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop
*
* The set destination address hold transfer size. The destination
@@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
* write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
* TA + 1 ... and so on.
*/
-static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
switch (size) {
case 0:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
- (~FSL_DMA_MR_DAHE), 32);
+ mode &= ~FSL_DMA_MR_DAHE;
break;
case 1:
case 2:
case 4:
case 8:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
- FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
- 32);
+ mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
break;
}
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
* fsl_chan_set_request_count - Set DMA Request Count for external control
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @size : Number of bytes to transfer in a single request
*
* The Freescale DMA channel can be controlled by the external signal DREQ#.
@@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
*
* A size of 0 disables external pause control. The maximum size is 1024.
*/
-static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
BUG_ON(size > 1024);
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | ((__ilog2(size) << 24) & 0x0f000000),
- 32);
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode |= (__ilog2(size) << 24) & 0x0f000000;
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
* fsl_chan_toggle_ext_pause - Toggle channel external pause status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled.
*
* The Freescale DMA channel can be controlled by the external signal DREQ#.
* The DMA Request Count feature should be used in addition to this feature
* to set the number of bytes to transfer before pausing the channel.
*/
-static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
{
if (enable)
- fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
+ chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
else
- fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
+ chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
}
/**
* fsl_chan_toggle_ext_start - Toggle channel external start status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled.
*
* If enable the external start, the channel can be started by an
@@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
* transfer immediately. The DMA channel will wait for the
* control pin asserted.
*/
-static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
{
if (enable)
- fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
+ chan->feature |= FSL_DMA_CHAN_START_EXT;
else
- fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+ chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+}
+
+static void append_ld_queue(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
+
+ if (list_empty(&chan->ld_pending))
+ goto out_splice;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ *
+ * This will un-set the EOL bit of the existing transaction, and the
+ * last link in this transaction will become the EOL descriptor.
+ */
+ set_desc_next(chan, &tail->hw, desc->async_tx.phys);
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+out_splice:
+ list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+ struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
dma_cookie_t cookie;
- /* cookie increment and adding to ld_queue must be atomic */
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- cookie = fsl_chan->common.cookie;
+ /*
+ * assign cookies to all of the software descriptors
+ * that make up this transaction
+ */
+ cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
if (cookie < 0)
cookie = 1;
- desc->async_tx.cookie = cookie;
+ child->async_tx.cookie = cookie;
}
- fsl_chan->common.cookie = cookie;
- append_ld_queue(fsl_chan, desc);
- list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
+ chan->common.cookie = cookie;
+
+ /* put this transaction onto the tail of the pending queue */
+ append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
/**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* Return - The descriptor allocated. NULL for failed.
*/
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
- struct fsl_dma_chan *fsl_chan)
+ struct fsldma_chan *chan)
{
+ struct fsl_desc_sw *desc;
dma_addr_t pdesc;
- struct fsl_desc_sw *desc_sw;
-
- desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
- if (desc_sw) {
- memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
- INIT_LIST_HEAD(&desc_sw->tx_list);
- dma_async_tx_descriptor_init(&desc_sw->async_tx,
- &fsl_chan->common);
- desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
- desc_sw->async_tx.phys = pdesc;
+
+ desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ if (!desc) {
+ dev_dbg(chan->dev, "out of memory for link desc\n");
+ return NULL;
}
- return desc_sw;
+ memset(desc, 0, sizeof(*desc));
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = fsl_dma_tx_submit;
+ desc->async_tx.phys = pdesc;
+
+ return desc;
}
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* This function will create a dma pool for descriptor allocation.
*
* Return - The number of descriptors allocated.
*/
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
/* Has this channel already been allocated? */
- if (fsl_chan->desc_pool)
+ if (chan->desc_pool)
return 1;
- /* We need the descriptor to be aligned to 32bytes
+ /*
+ * We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement.
*/
- fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
- fsl_chan->dev, sizeof(struct fsl_desc_sw),
- 32, 0);
- if (!fsl_chan->desc_pool) {
- dev_err(fsl_chan->dev, "No memory for channel %d "
- "descriptor dma pool.\n", fsl_chan->id);
- return 0;
+ chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
+ chan->dev,
+ sizeof(struct fsl_desc_sw),
+ __alignof__(struct fsl_desc_sw), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev, "unable to allocate channel %d "
+ "descriptor pool\n", chan->id);
+ return -ENOMEM;
}
+ /* there is at least one descriptor free to be allocated */
return 1;
}
/**
- * fsl_dma_free_chan_resources - Free all resources of the channel.
- * @fsl_chan : Freescale DMA channel
+ * fsldma_free_desc_list - Free all descriptors in a queue
+ * @chan: Freescae DMA channel
+ * @list: the list to free
+ *
+ * LOCKING: must hold chan->desc_lock
*/
-static void fsl_dma_free_chan_resources(struct dma_chan *chan)
+static void fsldma_free_desc_list(struct fsldma_chan *chan,
+ struct list_head *list)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
struct fsl_desc_sw *desc, *_desc;
- unsigned long flags;
- dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev,
- "LD %p will be released.\n", desc);
-#endif
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ }
+}
+
+static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
+ struct list_head *list)
+{
+ struct fsl_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node);
- /* free link descriptor */
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- dma_pool_destroy(fsl_chan->desc_pool);
+}
+
+/**
+ * fsl_dma_free_chan_resources - Free all resources of the channel.
+ * @chan : Freescale DMA channel
+ */
+static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
- fsl_chan->desc_pool = NULL;
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
static struct dma_async_tx_descriptor *
-fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
+fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *new;
- if (!chan)
+ if (!dchan)
return NULL;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
+ dev_err(chan->dev, "No free memory for link descriptor\n");
return NULL;
}
@@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
list_add_tail(&new->node, &new->tx_list);
/* Set End-of-link to the last link descriptor of new list*/
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
return &new->async_tx;
}
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
- struct list_head *list;
size_t copy;
- if (!chan)
+ if (!dchan)
return NULL;
if (!len)
return NULL;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
do {
/* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev,
+ dev_err(chan->dev,
"No free memory for link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
- set_desc_cnt(fsl_chan, &new->hw, copy);
- set_desc_src(fsl_chan, &new->hw, dma_src);
- set_desc_dest(fsl_chan, &new->hw, dma_dest);
+ set_desc_cnt(chan, &new->hw, copy);
+ set_desc_src(chan, &new->hw, dma_src);
+ set_desc_dst(chan, &new->hw, dma_dst);
if (!first)
first = new;
else
- set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
+ set_desc_next(chan, &prev->hw, new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
prev = new;
len -= copy;
dma_src += copy;
- dma_dest += copy;
+ dma_dst += copy;
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list);
@@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list*/
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
return &first->async_tx;
@@ -543,12 +578,7 @@ fail:
if (!first)
return NULL;
- list = &first->tx_list;
- list_for_each_entry_safe_reverse(new, prev, list, node) {
- list_del(&new->node);
- dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
- }
-
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
@@ -565,13 +595,12 @@ fail:
* chan->private variable.
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_data_direction direction, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsl_dma_slave *slave;
- struct list_head *tx_list;
size_t copy;
int i;
@@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct fsl_dma_hw_addr *hw;
dma_addr_t dma_dst, dma_src;
- if (!chan)
+ if (!dchan)
return NULL;
- if (!chan->private)
+ if (!dchan->private)
return NULL;
- fsl_chan = to_fsl_chan(chan);
- slave = chan->private;
+ chan = to_fsl_chan(dchan);
+ slave = dchan->private;
if (list_empty(&slave->addresses))
return NULL;
@@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
}
/* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev, "No free memory for "
+ dev_err(chan->dev, "No free memory for "
"link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
/*
@@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
}
/* Fill in the descriptor */
- set_desc_cnt(fsl_chan, &new->hw, copy);
- set_desc_src(fsl_chan, &new->hw, dma_src);
- set_desc_dest(fsl_chan, &new->hw, dma_dst);
+ set_desc_cnt(chan, &new->hw, copy);
+ set_desc_src(chan, &new->hw, dma_src);
+ set_desc_dst(chan, &new->hw, dma_dst);
/*
* If this is not the first descriptor, chain the
@@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
if (!first) {
first = new;
} else {
- set_desc_next(fsl_chan, &prev->hw,
+ set_desc_next(chan, &prev->hw,
new->async_tx.phys);
}
@@ -708,23 +737,23 @@ finished:
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
/* Enable extra controller features */
- if (fsl_chan->set_src_loop_size)
- fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
+ if (chan->set_src_loop_size)
+ chan->set_src_loop_size(chan, slave->src_loop_size);
- if (fsl_chan->set_dest_loop_size)
- fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
+ if (chan->set_dst_loop_size)
+ chan->set_dst_loop_size(chan, slave->dst_loop_size);
- if (fsl_chan->toggle_ext_start)
- fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
+ if (chan->toggle_ext_start)
+ chan->toggle_ext_start(chan, slave->external_start);
- if (fsl_chan->toggle_ext_pause)
- fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
+ if (chan->toggle_ext_pause)
+ chan->toggle_ext_pause(chan, slave->external_pause);
- if (fsl_chan->set_request_count)
- fsl_chan->set_request_count(fsl_chan, slave->request_count);
+ if (chan->set_request_count)
+ chan->set_request_count(chan, slave->request_count);
return &first->async_tx;
@@ -741,215 +770,216 @@ fail:
*
* We're re-using variables for the loop, oh well
*/
- tx_list = &first->tx_list;
- list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
- list_del_init(&new->node);
- dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
- }
-
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *chan)
+static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan;
- struct fsl_desc_sw *desc, *tmp;
+ struct fsldma_chan *chan;
unsigned long flags;
- if (!chan)
+ if (!dchan)
return;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
/* Halt the DMA engine */
- dma_halt(fsl_chan);
+ dma_halt(chan);
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
/* Remove and free all of the descriptors in the LD queue */
- list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
- list_del(&desc->node);
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
- }
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsl_dma_update_completed_cookie - Update the completed cookie.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
+ *
+ * CONTEXT: hardirq
*/
-static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
+static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
{
- struct fsl_desc_sw *cur_desc, *desc;
- dma_addr_t ld_phy;
+ struct fsl_desc_sw *desc;
+ unsigned long flags;
+ dma_cookie_t cookie;
- ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
+ spin_lock_irqsave(&chan->desc_lock, flags);
- if (ld_phy) {
- cur_desc = NULL;
- list_for_each_entry(desc, &fsl_chan->ld_queue, node)
- if (desc->async_tx.phys == ld_phy) {
- cur_desc = desc;
- break;
- }
+ if (list_empty(&chan->ld_running)) {
+ dev_dbg(chan->dev, "no running descriptors\n");
+ goto out_unlock;
+ }
- if (cur_desc && cur_desc->async_tx.cookie) {
- if (dma_is_idle(fsl_chan))
- fsl_chan->completed_cookie =
- cur_desc->async_tx.cookie;
- else
- fsl_chan->completed_cookie =
- cur_desc->async_tx.cookie - 1;
- }
+ /* Get the last descriptor, update the cookie to that */
+ desc = to_fsl_desc(chan->ld_running.prev);
+ if (dma_is_idle(chan))
+ cookie = desc->async_tx.cookie;
+ else {
+ cookie = desc->async_tx.cookie - 1;
+ if (unlikely(cookie < DMA_MIN_COOKIE))
+ cookie = DMA_MAX_COOKIE;
}
+
+ chan->completed_cookie = cookie;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+/**
+ * fsldma_desc_status - Check the status of a descriptor
+ * @chan: Freescale DMA channel
+ * @desc: DMA SW descriptor
+ *
+ * This function will return the status of the given descriptor
+ */
+static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ return dma_async_is_complete(desc->async_tx.cookie,
+ chan->completed_cookie,
+ chan->common.cookie);
}
/**
* fsl_chan_ld_cleanup - Clean up link descriptors
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* This function clean up the ld_queue of DMA channel.
- * If 'in_intr' is set, the function will move the link descriptor to
- * the recycle list. Otherwise, free it directly.
*/
-static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc, *_desc;
unsigned long flags;
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
- fsl_chan->completed_cookie);
- list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+ dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
dma_async_tx_callback callback;
void *callback_param;
- if (dma_async_is_complete(desc->async_tx.cookie,
- fsl_chan->completed_cookie, fsl_chan->common.cookie)
- == DMA_IN_PROGRESS)
+ if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
break;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
-
- /* Remove from ld_queue list */
+ /* Remove from the list of running transactions */
list_del(&desc->node);
- dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
- desc);
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
-
/* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
if (callback) {
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
- desc);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dev_dbg(chan->dev, "LD %p callback\n", desc);
callback(callback_param);
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
}
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
- * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
- * @fsl_chan : Freescale DMA channel
+ * fsl_chan_xfer_ld_queue - transfer any pending transactions
+ * @chan : Freescale DMA channel
+ *
+ * This will make sure that any pending transactions will be run.
+ * If the DMA controller is idle, it will be started. Otherwise,
+ * the DMA controller's interrupt handler will start any pending
+ * transactions when it becomes idle.
*/
-static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
- struct list_head *ld_node;
- dma_addr_t next_dest_addr;
+ struct fsl_desc_sw *desc;
unsigned long flags;
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- if (!dma_is_idle(fsl_chan))
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ dev_dbg(chan->dev, "no pending LDs\n");
goto out_unlock;
+ }
- dma_halt(fsl_chan);
+ /*
+ * The DMA controller is not idle, which means the interrupt
+ * handler will start any queued transactions when it runs
+ * at the end of the current transaction
+ */
+ if (!dma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ goto out_unlock;
+ }
- /* If there are some link descriptors
- * not transfered in queue. We need to start it.
+ /*
+ * TODO:
+ * make sure the dma_halt() function really un-wedges the
+ * controller as much as possible
*/
+ dma_halt(chan);
- /* Find the first un-transfer desciptor */
- for (ld_node = fsl_chan->ld_queue.next;
- (ld_node != &fsl_chan->ld_queue)
- && (dma_async_is_complete(
- to_fsl_desc(ld_node)->async_tx.cookie,
- fsl_chan->completed_cookie,
- fsl_chan->common.cookie) == DMA_SUCCESS);
- ld_node = ld_node->next);
-
- if (ld_node != &fsl_chan->ld_queue) {
- /* Get the ld start address from ld_queue */
- next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
- dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
- (unsigned long long)next_dest_addr);
- set_cdar(fsl_chan, next_dest_addr);
- dma_start(fsl_chan);
- } else {
- set_cdar(fsl_chan, 0);
- set_ndar(fsl_chan, 0);
- }
+ /*
+ * If there are some link descriptors which have not been
+ * transferred, we need to start the controller
+ */
+
+ /*
+ * Move all elements from the queue of pending transactions
+ * onto the list of running transactions
+ */
+ desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
+ list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_cdar(chan, desc->async_tx.phys);
+ dma_start(chan);
out_unlock:
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*/
-static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
+static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
-
-#ifdef FSL_DMA_LD_DEBUG
- struct fsl_desc_sw *ld;
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- if (list_empty(&fsl_chan->ld_queue)) {
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- return;
- }
-
- dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
- list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
- int i;
- dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
- fsl_chan->id, ld->async_tx.phys);
- for (i = 0; i < 8; i++)
- dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
- i, *(((u32 *)&ld->hw) + i));
- }
- dev_dbg(fsl_chan->dev, "----------------\n");
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-#endif
-
- fsl_chan_xfer_ld_queue(fsl_chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ fsl_chan_xfer_ld_queue(chan);
}
/**
* fsl_dma_is_complete - Determine the DMA status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
+static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
dma_cookie_t cookie,
dma_cookie_t *done,
dma_cookie_t *used)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
- fsl_chan_ld_cleanup(fsl_chan);
+ fsl_chan_ld_cleanup(chan);
- last_used = chan->cookie;
- last_complete = fsl_chan->completed_cookie;
+ last_used = dchan->cookie;
+ last_complete = chan->completed_cookie;
if (done)
*done = last_complete;
@@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
return dma_async_is_complete(cookie, last_complete, last_used);
}
-static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
+/*----------------------------------------------------------------------------*/
+/* Interrupt Handling */
+/*----------------------------------------------------------------------------*/
+
+static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
- struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- u32 stat;
+ struct fsldma_chan *chan = data;
int update_cookie = 0;
int xfer_ld_q = 0;
+ u32 stat;
- stat = get_sr(fsl_chan);
- dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
- fsl_chan->id, stat);
- set_sr(fsl_chan, stat); /* Clear the event register */
+ /* save and clear the status register */
+ stat = get_sr(chan);
+ set_sr(chan, stat);
+ dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;
if (stat & FSL_DMA_SR_TE)
- dev_err(fsl_chan->dev, "Transfer Error!\n");
+ dev_err(chan->dev, "Transfer Error!\n");
- /* Programming Error
+ /*
+ * Programming Error
* The DMA_INTERRUPT async_tx is a NULL transfer, which will
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
- dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
- if (get_bcr(fsl_chan) == 0) {
+ dev_dbg(chan->dev, "irq: Programming Error INT\n");
+ if (get_bcr(chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the
* next uncompleted transfer.
@@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
stat &= ~FSL_DMA_SR_PE;
}
- /* If the link descriptor segment transfer finishes,
+ /*
+ * If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
- dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
- (unsigned long long)get_cdar(fsl_chan),
- (unsigned long long)get_ndar(fsl_chan));
+ dev_dbg(chan->dev, "irq: End-of-segments INT\n");
+ dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
+ (unsigned long long)get_cdar(chan),
+ (unsigned long long)get_ndar(chan));
stat &= ~FSL_DMA_SR_EOSI;
update_cookie = 1;
}
- /* For MPC8349, EOCDI event need to update cookie
+ /*
+ * For MPC8349, EOCDI event need to update cookie
* and start the next transfer if it exist.
*/
if (stat & FSL_DMA_SR_EOCDI) {
- dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
+ dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI;
update_cookie = 1;
xfer_ld_q = 1;
}
- /* If it current transfer is the end-of-transfer,
+ /*
+ * If it current transfer is the end-of-transfer,
* we should clear the Channel Start bit for
* prepare next transfer.
*/
if (stat & FSL_DMA_SR_EOLNI) {
- dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
+ dev_dbg(chan->dev, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI;
xfer_ld_q = 1;
}
if (update_cookie)
- fsl_dma_update_completed_cookie(fsl_chan);
+ fsl_dma_update_completed_cookie(chan);
if (xfer_ld_q)
- fsl_chan_xfer_ld_queue(fsl_chan);
+ fsl_chan_xfer_ld_queue(chan);
if (stat)
- dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
- stat);
+ dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
- dev_dbg(fsl_chan->dev, "event: Exit\n");
- tasklet_schedule(&fsl_chan->tasklet);
+ dev_dbg(chan->dev, "irq: Exit\n");
+ tasklet_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
-static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
+static void dma_do_tasklet(unsigned long data)
{
- struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
- u32 gsr;
- int ch_nr;
+ struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ fsl_chan_ld_cleanup(chan);
+}
+
+static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
+{
+ struct fsldma_device *fdev = data;
+ struct fsldma_chan *chan;
+ unsigned int handled = 0;
+ u32 gsr, mask;
+ int i;
- gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
- : in_le32(fdev->reg_base);
- ch_nr = (32 - ffs(gsr)) / 8;
+ gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
+ : in_le32(fdev->regs);
+ mask = 0xff000000;
+ dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (gsr & mask) {
+ dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
+ fsldma_chan_irq(irq, chan);
+ handled++;
+ }
- return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
- fdev->chan[ch_nr]) : IRQ_NONE;
+ gsr &= ~mask;
+ mask >>= 8;
+ }
+
+ return IRQ_RETVAL(handled);
}
-static void dma_do_tasklet(unsigned long data)
+static void fsldma_free_irqs(struct fsldma_device *fdev)
{
- struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- fsl_chan_ld_cleanup(fsl_chan);
+ struct fsldma_chan *chan;
+ int i;
+
+ if (fdev->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "free per-controller IRQ\n");
+ free_irq(fdev->irq, fdev);
+ return;
+ }
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (chan && chan->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
+ free_irq(chan->irq, chan);
+ }
+ }
}
-static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
+static int fsldma_request_irqs(struct fsldma_device *fdev)
+{
+ struct fsldma_chan *chan;
+ int ret;
+ int i;
+
+ /* if we have a per-controller IRQ, use that */
+ if (fdev->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "request per-controller IRQ\n");
+ ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
+ "fsldma-controller", fdev);
+ return ret;
+ }
+
+ /* no per-controller IRQ, use the per-channel IRQs */
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (chan->irq == NO_IRQ) {
+ dev_err(fdev->dev, "no interrupts property defined for "
+ "DMA channel %d. Please fix your "
+ "device tree\n", chan->id);
+ ret = -ENODEV;
+ goto out_unwind;
+ }
+
+ dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
+ ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
+ "fsldma-chan", chan);
+ if (ret) {
+ dev_err(fdev->dev, "unable to request IRQ for DMA "
+ "channel %d\n", chan->id);
+ goto out_unwind;
+ }
+ }
+
+ return 0;
+
+out_unwind:
+ for (/* none */; i >= 0; i--) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (chan->irq == NO_IRQ)
+ continue;
+
+ free_irq(chan->irq, chan);
+ }
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/* OpenFirmware Subsystem */
+/*----------------------------------------------------------------------------*/
+
+static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
struct device_node *node, u32 feature, const char *compatible)
{
- struct fsl_dma_chan *new_fsl_chan;
+ struct fsldma_chan *chan;
+ struct resource res;
int err;
/* alloc channel */
- new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
- if (!new_fsl_chan) {
- dev_err(fdev->dev, "No free memory for allocating "
- "dma channels!\n");
- return -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(fdev->dev, "no free memory for DMA channels!\n");
+ err = -ENOMEM;
+ goto out_return;
}
- /* get dma channel register base */
- err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
- if (err) {
- dev_err(fdev->dev, "Can't get %s property 'reg'\n",
- node->full_name);
- goto err_no_reg;
+ /* ioremap registers for use */
+ chan->regs = of_iomap(node, 0);
+ if (!chan->regs) {
+ dev_err(fdev->dev, "unable to ioremap registers\n");
+ err = -ENOMEM;
+ goto out_free_chan;
}
- new_fsl_chan->feature = feature;
+ err = of_address_to_resource(node, 0, &res);
+ if (err) {
+ dev_err(fdev->dev, "unable to find 'reg' property\n");
+ goto out_iounmap_regs;
+ }
+ chan->feature = feature;
if (!fdev->feature)
- fdev->feature = new_fsl_chan->feature;
+ fdev->feature = chan->feature;
- /* If the DMA device's feature is different than its channels',
- * report the bug.
+ /*
+ * If the DMA device's feature is different than the feature
+ * of its channels, report the bug
*/
- WARN_ON(fdev->feature != new_fsl_chan->feature);
+ WARN_ON(fdev->feature != chan->feature);
- new_fsl_chan->dev = fdev->dev;
- new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
- new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
-
- new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
- if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
- dev_err(fdev->dev, "There is no %d channel!\n",
- new_fsl_chan->id);
+ chan->dev = fdev->dev;
+ chan->id = ((res.start - 0x100) & 0xfff) >> 7;
+ if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
+ dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL;
- goto err_no_chan;
+ goto out_iounmap_regs;
}
- fdev->chan[new_fsl_chan->id] = new_fsl_chan;
- tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
- (unsigned long)new_fsl_chan);
- /* Init the channel */
- dma_init(new_fsl_chan);
+ fdev->chan[chan->id] = chan;
+ tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+
+ /* Initialize the channel */
+ dma_init(chan);
/* Clear cdar registers */
- set_cdar(new_fsl_chan, 0);
+ set_cdar(chan, 0);
- switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
+ switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
- new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
case FSL_DMA_IP_83XX:
- new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
- new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
- new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
- new_fsl_chan->set_request_count = fsl_chan_set_request_count;
+ chan->toggle_ext_start = fsl_chan_toggle_ext_start;
+ chan->set_src_loop_size = fsl_chan_set_src_loop_size;
+ chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
+ chan->set_request_count = fsl_chan_set_request_count;
}
- spin_lock_init(&new_fsl_chan->desc_lock);
- INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
+ spin_lock_init(&chan->desc_lock);
+ INIT_LIST_HEAD(&chan->ld_pending);
+ INIT_LIST_HEAD(&chan->ld_running);
+
+ chan->common.device = &fdev->common;
- new_fsl_chan->common.device = &fdev->common;
+ /* find the IRQ line, if it exists in the device tree */
+ chan->irq = irq_of_parse_and_map(node, 0);
/* Add the channel to DMA device channel list */
- list_add_tail(&new_fsl_chan->common.device_node,
- &fdev->common.channels);
+ list_add_tail(&chan->common.device_node, &fdev->common.channels);
fdev->common.chancnt++;
- new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
- if (new_fsl_chan->irq != NO_IRQ) {
- err = request_irq(new_fsl_chan->irq,
- &fsl_dma_chan_do_interrupt, IRQF_SHARED,
- "fsldma-channel", new_fsl_chan);
- if (err) {
- dev_err(fdev->dev, "DMA channel %s request_irq error "
- "with return %d\n", node->full_name, err);
- goto err_no_irq;
- }
- }
-
- dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
- compatible,
- new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
+ dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
+ chan->irq != NO_IRQ ? chan->irq : fdev->irq);
return 0;
-err_no_irq:
- list_del(&new_fsl_chan->common.device_node);
-err_no_chan:
- iounmap(new_fsl_chan->reg_base);
-err_no_reg:
- kfree(new_fsl_chan);
+out_iounmap_regs:
+ iounmap(chan->regs);
+out_free_chan:
+ kfree(chan);
+out_return:
return err;
}
-static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
+static void fsl_dma_chan_remove(struct fsldma_chan *chan)
{
- if (fchan->irq != NO_IRQ)
- free_irq(fchan->irq, fchan);
- list_del(&fchan->common.device_node);
- iounmap(fchan->reg_base);
- kfree(fchan);
+ irq_dispose_mapping(chan->irq);
+ list_del(&chan->common.device_node);
+ iounmap(chan->regs);
+ kfree(chan);
}
-static int __devinit of_fsl_dma_probe(struct of_device *dev,
+static int __devinit fsldma_of_probe(struct of_device *op,
const struct of_device_id *match)
{
- int err;
- struct fsl_dma_device *fdev;
+ struct fsldma_device *fdev;
struct device_node *child;
+ int err;
- fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev) {
- dev_err(&dev->dev, "No enough memory for 'priv'\n");
- return -ENOMEM;
+ dev_err(&op->dev, "No enough memory for 'priv'\n");
+ err = -ENOMEM;
+ goto out_return;
}
- fdev->dev = &dev->dev;
+
+ fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
- /* get DMA controller register base */
- err = of_address_to_resource(dev->node, 0, &fdev->reg);
- if (err) {
- dev_err(&dev->dev, "Can't get %s property 'reg'\n",
- dev->node->full_name);
- goto err_no_reg;
+ /* ioremap the registers for use */
+ fdev->regs = of_iomap(op->node, 0);
+ if (!fdev->regs) {
+ dev_err(&op->dev, "unable to ioremap registers\n");
+ err = -ENOMEM;
+ goto out_free_fdev;
}
- dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
- "controller at 0x%llx...\n",
- match->compatible, (unsigned long long)fdev->reg.start);
- fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- - fdev->reg.start + 1);
+ /* map the channel IRQ if it exists, but don't hookup the handler yet */
+ fdev->irq = irq_of_parse_and_map(op->node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
- fdev->common.dev = &dev->dev;
+ fdev->common.dev = &op->dev;
- fdev->irq = irq_of_parse_and_map(dev->node, 0);
- if (fdev->irq != NO_IRQ) {
- err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
- "fsldma-device", fdev);
- if (err) {
- dev_err(&dev->dev, "DMA device request_irq error "
- "with return %d\n", err);
- goto err;
- }
- }
-
- dev_set_drvdata(&(dev->dev), fdev);
+ dev_set_drvdata(&op->dev, fdev);
- /* We cannot use of_platform_bus_probe() because there is no
- * of_platform_bus_remove. Instead, we manually instantiate every DMA
+ /*
+ * We cannot use of_platform_bus_probe() because there is no
+ * of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object.
*/
- for_each_child_of_node(dev->node, child) {
- if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
+ for_each_child_of_node(op->node, child) {
+ if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
"fsl,eloplus-dma-channel");
- if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
+ }
+
+ if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
"fsl,elo-dma-channel");
+ }
+ }
+
+ /*
+ * Hookup the IRQ handler(s)
+ *
+ * If we have a per-controller interrupt, we prefer that to the
+ * per-channel interrupts to reduce the number of shared interrupt
+ * handlers on the same IRQ line
+ */
+ err = fsldma_request_irqs(fdev);
+ if (err) {
+ dev_err(fdev->dev, "unable to request IRQs\n");
+ goto out_free_fdev;
}
dma_async_device_register(&fdev->common);
return 0;
-err:
- iounmap(fdev->reg_base);
-err_no_reg:
+out_free_fdev:
+ irq_dispose_mapping(fdev->irq);
kfree(fdev);
+out_return:
return err;
}
-static int of_fsl_dma_remove(struct of_device *of_dev)
+static int fsldma_of_remove(struct of_device *op)
{
- struct fsl_dma_device *fdev;
+ struct fsldma_device *fdev;
unsigned int i;
- fdev = dev_get_drvdata(&of_dev->dev);
-
+ fdev = dev_get_drvdata(&op->dev);
dma_async_device_unregister(&fdev->common);
- for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
+ fsldma_free_irqs(fdev);
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
+ }
- if (fdev->irq != NO_IRQ)
- free_irq(fdev->irq, fdev);
-
- iounmap(fdev->reg_base);
-
+ iounmap(fdev->regs);
+ dev_set_drvdata(&op->dev, NULL);
kfree(fdev);
- dev_set_drvdata(&of_dev->dev, NULL);
return 0;
}
-static struct of_device_id of_fsl_dma_ids[] = {
+static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{}
};
-static struct of_platform_driver of_fsl_dma_driver = {
- .name = "fsl-elo-dma",
- .match_table = of_fsl_dma_ids,
- .probe = of_fsl_dma_probe,
- .remove = of_fsl_dma_remove,
+static struct of_platform_driver fsldma_of_driver = {
+ .name = "fsl-elo-dma",
+ .match_table = fsldma_of_ids,
+ .probe = fsldma_of_probe,
+ .remove = fsldma_of_remove,
};
-static __init int of_fsl_dma_init(void)
+/*----------------------------------------------------------------------------*/
+/* Module Init / Exit */
+/*----------------------------------------------------------------------------*/
+
+static __init int fsldma_init(void)
{
int ret;
pr_info("Freescale Elo / Elo Plus DMA driver\n");
- ret = of_register_platform_driver(&of_fsl_dma_driver);
+ ret = of_register_platform_driver(&fsldma_of_driver);
if (ret)
pr_err("fsldma: failed to register platform driver\n");
return ret;
}
-static void __exit of_fsl_dma_exit(void)
+static void __exit fsldma_exit(void)
{
- of_unregister_platform_driver(&of_fsl_dma_driver);
+ of_unregister_platform_driver(&fsldma_of_driver);
}
-subsys_initcall(of_fsl_dma_init);
-module_exit(of_fsl_dma_exit);
+subsys_initcall(fsldma_init);
+module_exit(fsldma_exit);
MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 0df14cbb8ca..cb4d6ff5159 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -92,11 +92,9 @@ struct fsl_desc_sw {
struct list_head node;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
- struct list_head *ld;
- void *priv;
} __attribute__((aligned(32)));
-struct fsl_dma_chan_regs {
+struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */
@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
u64 ndar; /* 0x24 - Next Descriptor Address Register */
};
-struct fsl_dma_chan;
+struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
-struct fsl_dma_device {
- void __iomem *reg_base; /* DGSR register base */
- struct resource reg; /* Resource for register */
+struct fsldma_device {
+ void __iomem *regs; /* DGSR register base */
struct device *dev;
struct dma_device common;
- struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
+ struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
};
-/* Define macros for fsl_dma_chan->feature property */
+/* Define macros for fsldma_chan->feature property */
#define FSL_DMA_LITTLE_ENDIAN 0x00000000
#define FSL_DMA_BIG_ENDIAN 0x00000001
@@ -130,28 +127,28 @@ struct fsl_dma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000
-struct fsl_dma_chan {
- struct fsl_dma_chan_regs __iomem *reg_base;
+struct fsldma_chan {
+ struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_pending; /* Link descriptors queue */
+ struct list_head ld_running; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */
- struct resource reg; /* Resource for register */
int irq; /* Channel IRQ */
int id; /* Raw id of this channel */
struct tasklet_struct tasklet;
u32 feature;
- void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
- void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
- void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
- void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
- void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
+ void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
+ void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
+ void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
+ void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
};
-#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
+#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index dcc4ab78b32..af14c9a5b8d 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -71,7 +71,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
}
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
- for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+ for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
tasklet_schedule(&chan->cleanup_task);
}
@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED;
}
-static void ioat1_cleanup_tasklet(unsigned long data);
-
/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx,
- void (*timer_fn)(unsigned long),
- void (*tasklet)(unsigned long),
- unsigned long ioat)
+void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
{
struct dma_device *dma = &device->common;
+ struct dma_chan *c = &chan->common;
+ unsigned long data = (unsigned long) c;
chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1));
@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan;
init_timer(&chan->timer);
- chan->timer.function = timer_fn;
- chan->timer.data = ioat;
- tasklet_init(&chan->cleanup_task, tasklet, ioat);
+ chan->timer.function = device->timer_fn;
+ chan->timer.data = data;
+ tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
tasklet_disable(&chan->cleanup_task);
}
-static void ioat1_timer_event(unsigned long data);
-
/**
* ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
if (!ioat)
break;
- ioat_init_channel(device, &ioat->base, i,
- ioat1_timer_event,
- ioat1_cleanup_tasklet,
- (unsigned long) ioat);
+ ioat_init_channel(device, &ioat->base, i);
ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc);
@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
return &desc->txd;
}
-static void ioat1_cleanup_tasklet(unsigned long data)
+static void ioat1_cleanup_event(unsigned long data)
{
- struct ioat_dma_chan *chan = (void *)data;
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
- ioat1_cleanup(chan);
- writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ ioat1_cleanup(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
static void ioat1_timer_event(unsigned long data)
{
- struct ioat_dma_chan *ioat = (void *) data;
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock);
}
-static enum dma_status
-ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+enum dma_status
+ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_chan_common *chan = to_chan_common(c);
+ struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat1_cleanup(ioat);
+ device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used);
}
@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test;
+ device->timer_fn = ioat1_timer_event;
+ device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat1_dma_is_complete;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index bbc3e78ef33..4f747a25407 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -61,7 +61,7 @@
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
* @reset_hw: hw version specific channel (re)initialization
- * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+ * @cleanup_fn: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
*
@@ -80,7 +80,7 @@ struct ioatdma_device {
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan);
- void (*cleanup_tasklet)(unsigned long data);
+ void (*cleanup_fn)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
};
@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx,
- void (*timer_fn)(unsigned long),
- void (*tasklet)(unsigned long),
- unsigned long ioat);
+ struct ioat_chan_common *chan, int idx);
+enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5cc37afe2bc..1ed5d66d7dc 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{
- void * __iomem reg_base = ioat->base.reg_base;
+ struct ioat_chan_common *chan = &ioat->base;
- ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */
wmb();
- writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
- dev_dbg(to_dev(&ioat->base),
+ writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+ dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
}
-void ioat2_issue_pending(struct dma_chan *chan)
+void ioat2_issue_pending(struct dma_chan *c)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- spin_lock_bh(&ioat->ring_lock);
- if (ioat->pending == 1)
+ if (ioat2_ring_pending(ioat)) {
+ spin_lock_bh(&ioat->ring_lock);
__ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->ring_lock);
+ }
}
/**
* ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel
*
- * set pending to '1' unless pending is already set to '2', pending == 2
- * indicates that submission is temporarily blocked due to an in-flight
- * reset. If we are already above the ioat_pending_level threshold then
- * just issue pending.
- *
- * called with ring_lock held
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark. Called with ring_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
- if (unlikely(ioat->pending == 2))
- return;
- else if (ioat2_ring_pending(ioat) > ioat_pending_level)
+ if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat);
- else
- ioat->pending = 1;
}
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
seen_current = true;
}
ioat->tail += i;
- BUG_ON(!seen_current); /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) {
@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock);
}
-void ioat2_cleanup_tasklet(unsigned long data)
+void ioat2_cleanup_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat2_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
void ioat2_timer_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock);
@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
if (!ioat)
break;
- ioat_init_channel(device, &ioat->base, i,
- device->timer_fn,
- device->cleanup_tasklet,
- (unsigned long) ioat);
+ ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
if (device->reset_hw(&ioat->base)) {
@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
- ioat->pending = 0;
ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock);
@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&chan->cleanup_lock);
- device->timer_fn((unsigned long) ioat);
+ device->timer_fn((unsigned long) &chan->common);
} else
spin_unlock_bh(&chan->cleanup_lock);
return -ENOMEM;
@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
- device->cleanup_tasklet((unsigned long) ioat);
+ device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock);
@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0;
chan->completion_dma = 0;
- ioat->pending = 0;
ioat->dmacount = 0;
}
-enum dma_status
-ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioatdma_device *device = ioat->base.device;
-
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
- return DMA_SUCCESS;
-
- device->cleanup_tasklet((unsigned long) ioat);
-
- return ioat_is_complete(c, cookie, done, used);
-}
-
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw;
- device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
dma = &device->common;
@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat2_is_complete;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 3afad8da43c..ef2871fd786 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index
* @issued: hardware notification point
* @tail: cleanup index
- * @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring
@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail;
u16 dmacount;
u16 alloc_order;
- int pending;
struct ioat_ring_ent **ring;
spinlock_t ring_lock;
};
@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c);
-enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_tasklet(unsigned long data);
+void ioat2_cleanup_event(unsigned long data);
void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 9908c9e94b2..26febc56dab 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
}
}
ioat->tail += i;
- BUG_ON(!seen_current); /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- if (ioat->head == ioat->tail) {
+
+ active = ioat2_ring_active(ioat);
+ if (active == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+ /* 5 microsecond delay per pending descriptor */
+ writew(min((5 * active), IOAT_INTRDELAY_MASK),
+ chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+/* try to cleanup, but yield (via spin_trylock) to incoming submissions
+ * with the expectation that we will immediately poll again shortly
+ */
+static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock);
}
-static void ioat3_cleanup_tasklet(unsigned long data)
+/* run cleanup now because we already delayed the interrupt via INTRDELAY */
+static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ prefetch(chan->completion);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+ spin_lock_bh(&ioat->ring_lock);
+
+ __cleanup(ioat, phys_complete);
+
+ spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat3_cleanup_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- ioat3_cleanup(ioat);
- writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
- ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ ioat3_cleanup_sync(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- u32 status;
-
- status = ioat_chansts(chan);
- if (is_ioat_active(status) || is_ioat_idle(status))
- ioat_suspend(chan);
- while (is_ioat_active(status) || is_ioat_idle(status)) {
- status = ioat_chansts(chan);
- cpu_relax();
- }
+ ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
static void ioat3_timer_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock);
@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup(ioat);
+ ioat3_cleanup_poll(ioat);
return ioat_is_complete(c, cookie, done, used);
}
@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
dma->device_is_tx_complete = ioat3_is_complete;
- device->cleanup_tasklet = ioat3_cleanup_tasklet;
+ device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat2_is_complete;
- device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
+ device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index e8ae63baf58..1391798542b 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -60,7 +60,7 @@
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
-#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
+#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index e80bae1673f..2a446397c88 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break;
case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
- case IPU_PIX_FMT_ABGR32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 8; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 24; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2;
params->ip.pfs = 6;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
new file mode 100644
index 00000000000..3fdf1f46bd6
--- /dev/null
+++ b/drivers/dma/mpc512x_dma.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This is initial version of MPC5121 DMA driver. Only memory to memory
+ * transfers are supported (tested using dmatest module).
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <linux/random.h>
+
+/* Number of DMA Transfer descriptors allocated per channel */
+#define MPC_DMA_DESCRIPTORS 64
+
+/* Macro definitions */
+#define MPC_DMA_CHANNELS 64
+#define MPC_DMA_TCD_OFFSET 0x1000
+
+/* Arbitration mode of group and channel */
+#define MPC_DMA_DMACR_EDCG (1 << 31)
+#define MPC_DMA_DMACR_ERGA (1 << 3)
+#define MPC_DMA_DMACR_ERCA (1 << 2)
+
+/* Error codes */
+#define MPC_DMA_DMAES_VLD (1 << 31)
+#define MPC_DMA_DMAES_GPE (1 << 15)
+#define MPC_DMA_DMAES_CPE (1 << 14)
+#define MPC_DMA_DMAES_ERRCHN(err) \
+ (((err) >> 8) & 0x3f)
+#define MPC_DMA_DMAES_SAE (1 << 7)
+#define MPC_DMA_DMAES_SOE (1 << 6)
+#define MPC_DMA_DMAES_DAE (1 << 5)
+#define MPC_DMA_DMAES_DOE (1 << 4)
+#define MPC_DMA_DMAES_NCE (1 << 3)
+#define MPC_DMA_DMAES_SGE (1 << 2)
+#define MPC_DMA_DMAES_SBE (1 << 1)
+#define MPC_DMA_DMAES_DBE (1 << 0)
+
+#define MPC_DMA_TSIZE_1 0x00
+#define MPC_DMA_TSIZE_2 0x01
+#define MPC_DMA_TSIZE_4 0x02
+#define MPC_DMA_TSIZE_16 0x04
+#define MPC_DMA_TSIZE_32 0x05
+
+/* MPC5121 DMA engine registers */
+struct __attribute__ ((__packed__)) mpc_dma_regs {
+ /* 0x00 */
+ u32 dmacr; /* DMA control register */
+ u32 dmaes; /* DMA error status */
+ /* 0x08 */
+ u32 dmaerqh; /* DMA enable request high(channels 63~32) */
+ u32 dmaerql; /* DMA enable request low(channels 31~0) */
+ u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
+ u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
+ /* 0x18 */
+ u8 dmaserq; /* DMA set enable request */
+ u8 dmacerq; /* DMA clear enable request */
+ u8 dmaseei; /* DMA set enable error interrupt */
+ u8 dmaceei; /* DMA clear enable error interrupt */
+ /* 0x1c */
+ u8 dmacint; /* DMA clear interrupt request */
+ u8 dmacerr; /* DMA clear error */
+ u8 dmassrt; /* DMA set start bit */
+ u8 dmacdne; /* DMA clear DONE status bit */
+ /* 0x20 */
+ u32 dmainth; /* DMA interrupt request high(ch63~32) */
+ u32 dmaintl; /* DMA interrupt request low(ch31~0) */
+ u32 dmaerrh; /* DMA error high(ch63~32) */
+ u32 dmaerrl; /* DMA error low(ch31~0) */
+ /* 0x30 */
+ u32 dmahrsh; /* DMA hw request status high(ch63~32) */
+ u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
+ u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
+ /* 0x40 ~ 0xff */
+ u32 reserve0[48]; /* Reserved */
+ /* 0x100 */
+ u8 dchpri[MPC_DMA_CHANNELS];
+ /* DMA channels(0~63) priority */
+};
+
+struct __attribute__ ((__packed__)) mpc_dma_tcd {
+ /* 0x00 */
+ u32 saddr; /* Source address */
+
+ u32 smod:5; /* Source address modulo */
+ u32 ssize:3; /* Source data transfer size */
+ u32 dmod:5; /* Destination address modulo */
+ u32 dsize:3; /* Destination data transfer size */
+ u32 soff:16; /* Signed source address offset */
+
+ /* 0x08 */
+ u32 nbytes; /* Inner "minor" byte count */
+ u32 slast; /* Last source address adjustment */
+ u32 daddr; /* Destination address */
+
+ /* 0x14 */
+ u32 citer_elink:1; /* Enable channel-to-channel linking on
+ * minor loop complete
+ */
+ u32 citer_linkch:6; /* Link channel for minor loop complete */
+ u32 citer:9; /* Current "major" iteration count */
+ u32 doff:16; /* Signed destination address offset */
+
+ /* 0x18 */
+ u32 dlast_sga; /* Last Destination address adjustment/scatter
+ * gather address
+ */
+
+ /* 0x1c */
+ u32 biter_elink:1; /* Enable channel-to-channel linking on major
+ * loop complete
+ */
+ u32 biter_linkch:6;
+ u32 biter:9; /* Beginning "major" iteration count */
+ u32 bwc:2; /* Bandwidth control */
+ u32 major_linkch:6; /* Link channel number */
+ u32 done:1; /* Channel done */
+ u32 active:1; /* Channel active */
+ u32 major_elink:1; /* Enable channel-to-channel linking on major
+ * loop complete
+ */
+ u32 e_sg:1; /* Enable scatter/gather processing */
+ u32 d_req:1; /* Disable request */
+ u32 int_half:1; /* Enable an interrupt when major counter is
+ * half complete
+ */
+ u32 int_maj:1; /* Enable an interrupt when major iteration
+ * count completes
+ */
+ u32 start:1; /* Channel start */
+};
+
+struct mpc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ int error;
+ struct list_head node;
+};
+
+struct mpc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ dma_cookie_t completed_cookie;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct mpc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
+ struct mpc_dma_regs __iomem *regs;
+ struct mpc_dma_tcd __iomem *tcd;
+ int irq;
+ uint error_status;
+
+ /* Lock for error_status field in this structure */
+ spinlock_t error_status_lock;
+};
+
+#define DRV_NAME "mpc512x_dma"
+
+/* Convert struct dma_chan to struct mpc_dma_chan */
+static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct mpc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct mpc_dma */
+static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
+ return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
+}
+
+/*
+ * Execute all queued DMA descriptors.
+ *
+ * Following requirements must be met while calling mpc_dma_execute():
+ * a) mchan->lock is acquired,
+ * b) mchan->active list is empty,
+ * c) mchan->queued list contains at least one entry.
+ */
+static void mpc_dma_execute(struct mpc_dma_chan *mchan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
+ struct mpc_dma_desc *first = NULL;
+ struct mpc_dma_desc *prev = NULL;
+ struct mpc_dma_desc *mdesc;
+ int cid = mchan->chan.chan_id;
+
+ /* Move all queued descriptors to active list */
+ list_splice_tail_init(&mchan->queued, &mchan->active);
+
+ /* Chain descriptors into one transaction */
+ list_for_each_entry(mdesc, &mchan->active, node) {
+ if (!first)
+ first = mdesc;
+
+ if (!prev) {
+ prev = mdesc;
+ continue;
+ }
+
+ prev->tcd->dlast_sga = mdesc->tcd_paddr;
+ prev->tcd->e_sg = 1;
+ mdesc->tcd->start = 1;
+
+ prev = mdesc;
+ }
+
+ prev->tcd->start = 0;
+ prev->tcd->int_maj = 1;
+
+ /* Send first descriptor in chain into hardware */
+ memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+ out_8(&mdma->regs->dmassrt, cid);
+}
+
+/* Handle interrupt on one half of DMA controller (32 channels) */
+static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
+{
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma_desc *mdesc;
+ u32 status = is | es;
+ int ch;
+
+ while ((ch = fls(status) - 1) >= 0) {
+ status &= ~(1 << ch);
+ mchan = &mdma->channels[ch + off];
+
+ spin_lock(&mchan->lock);
+
+ /* Check error status */
+ if (es & (1 << ch))
+ list_for_each_entry(mdesc, &mchan->active, node)
+ mdesc->error = -EIO;
+
+ /* Execute queued descriptors */
+ list_splice_tail_init(&mchan->active, &mchan->completed);
+ if (!list_empty(&mchan->queued))
+ mpc_dma_execute(mchan);
+
+ spin_unlock(&mchan->lock);
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t mpc_dma_irq(int irq, void *data)
+{
+ struct mpc_dma *mdma = data;
+ uint es;
+
+ /* Save error status register */
+ es = in_be32(&mdma->regs->dmaes);
+ spin_lock(&mdma->error_status_lock);
+ if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
+ mdma->error_status = es;
+ spin_unlock(&mdma->error_status_lock);
+
+ /* Handle interrupt on each channel */
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+ in_be32(&mdma->regs->dmaerrh), 32);
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
+ in_be32(&mdma->regs->dmaerrl), 0);
+
+ /* Ack interrupt on all channels */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Schedule tasklet */
+ tasklet_schedule(&mdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+ struct mpc_dma *mdma = (void *)data;
+ dma_cookie_t last_cookie = 0;
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma_desc *mdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ LIST_HEAD(list);
+ uint es;
+ int i;
+
+ spin_lock_irqsave(&mdma->error_status_lock, flags);
+ es = mdma->error_status;
+ mdma->error_status = 0;
+ spin_unlock_irqrestore(&mdma->error_status_lock, flags);
+
+ /* Print nice error report */
+ if (es) {
+ dev_err(mdma->dma.dev,
+ "Hardware reported following error(s) on channel %u:\n",
+ MPC_DMA_DMAES_ERRCHN(es));
+
+ if (es & MPC_DMA_DMAES_GPE)
+ dev_err(mdma->dma.dev, "- Group Priority Error\n");
+ if (es & MPC_DMA_DMAES_CPE)
+ dev_err(mdma->dma.dev, "- Channel Priority Error\n");
+ if (es & MPC_DMA_DMAES_SAE)
+ dev_err(mdma->dma.dev, "- Source Address Error\n");
+ if (es & MPC_DMA_DMAES_SOE)
+ dev_err(mdma->dma.dev, "- Source Offset"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_DAE)
+ dev_err(mdma->dma.dev, "- Destination Address"
+ " Error\n");
+ if (es & MPC_DMA_DMAES_DOE)
+ dev_err(mdma->dma.dev, "- Destination Offset"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_NCE)
+ dev_err(mdma->dma.dev, "- NBytes/Citter"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_SGE)
+ dev_err(mdma->dma.dev, "- Scatter/Gather"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_SBE)
+ dev_err(mdma->dma.dev, "- Source Bus Error\n");
+ if (es & MPC_DMA_DMAES_DBE)
+ dev_err(mdma->dma.dev, "- Destination Bus Error\n");
+ }
+
+ for (i = 0; i < mdma->dma.chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!list_empty(&mchan->completed))
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (list_empty(&list))
+ continue;
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ desc = &mdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ list_splice_tail_init(&list, &mchan->free);
+ mchan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
+ struct mpc_dma_desc *mdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ mdesc = container_of(txd, struct mpc_dma_desc, desc);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&mdesc->node, &mchan->queued);
+
+ /* If channel is idle, execute all queued descriptors */
+ if (list_empty(&mchan->active))
+ mpc_dma_execute(mchan);
+
+ /* Update cookie */
+ cookie = mchan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ mchan->chan.cookie = cookie;
+ mdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return cookie;
+}
+
+/* Alloc channel resources */
+static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc DMA memory for Transfer Control Descriptors */
+ tcd = dma_alloc_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ &tcd_paddr, GFP_KERNEL);
+ if (!tcd)
+ return -ENOMEM;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
+ mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
+ if (!mdesc) {
+ dev_notice(mdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&mdesc->desc, chan);
+ mdesc->desc.flags = DMA_CTRL_ACK;
+ mdesc->desc.tx_submit = mpc_dma_tx_submit;
+
+ mdesc->tcd = &tcd[i];
+ mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
+
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0) {
+ dma_free_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ tcd, tcd_paddr);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ mchan->tcd = tcd;
+ mchan->tcd_paddr = tcd_paddr;
+ list_splice_tail_init(&descs, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* Enable Error Interrupt */
+ out_8(&mdma->regs->dmaseei, chan->chan_id);
+
+ return 0;
+}
+
+/* Free channel resources */
+static void mpc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc, *tmp;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&mchan->prepared));
+ BUG_ON(!list_empty(&mchan->queued));
+ BUG_ON(!list_empty(&mchan->active));
+ BUG_ON(!list_empty(&mchan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+ tcd = mchan->tcd;
+ tcd_paddr = mchan->tcd_paddr;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* Free DMA memory used by descriptors */
+ dma_free_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ tcd, tcd_paddr);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node)
+ kfree(mdesc);
+
+ /* Disable Error Interrupt */
+ out_8(&mdma->regs->dmaceei, chan->chan_id);
+}
+
+/* Send all pending descriptor to hardware */
+static void mpc_dma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * We are posting descriptors to the hardware as soon as
+ * they are ready, so this function does nothing.
+ */
+}
+
+/* Check request completion status */
+static enum dma_status
+mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ last_used = mchan->chan.cookie;
+ last_complete = mchan->completed_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (done)
+ *done = last_complete;
+
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* Prepare descriptor for memory to memory copy */
+static struct dma_async_tx_descriptor *
+mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc = NULL;
+ struct mpc_dma_tcd *tcd;
+ unsigned long iflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
+ node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ if (!mdesc)
+ return NULL;
+
+ mdesc->error = 0;
+ tcd = mdesc->tcd;
+
+ /* Prepare Transfer Control Descriptor for this transaction */
+ memset(tcd, 0, sizeof(struct mpc_dma_tcd));
+
+ if (IS_ALIGNED(src | dst | len, 32)) {
+ tcd->ssize = MPC_DMA_TSIZE_32;
+ tcd->dsize = MPC_DMA_TSIZE_32;
+ tcd->soff = 32;
+ tcd->doff = 32;
+ } else if (IS_ALIGNED(src | dst | len, 16)) {
+ tcd->ssize = MPC_DMA_TSIZE_16;
+ tcd->dsize = MPC_DMA_TSIZE_16;
+ tcd->soff = 16;
+ tcd->doff = 16;
+ } else if (IS_ALIGNED(src | dst | len, 4)) {
+ tcd->ssize = MPC_DMA_TSIZE_4;
+ tcd->dsize = MPC_DMA_TSIZE_4;
+ tcd->soff = 4;
+ tcd->doff = 4;
+ } else if (IS_ALIGNED(src | dst | len, 2)) {
+ tcd->ssize = MPC_DMA_TSIZE_2;
+ tcd->dsize = MPC_DMA_TSIZE_2;
+ tcd->soff = 2;
+ tcd->doff = 2;
+ } else {
+ tcd->ssize = MPC_DMA_TSIZE_1;
+ tcd->dsize = MPC_DMA_TSIZE_1;
+ tcd->soff = 1;
+ tcd->doff = 1;
+ }
+
+ tcd->saddr = src;
+ tcd->daddr = dst;
+ tcd->nbytes = len;
+ tcd->biter = 1;
+ tcd->citer = 1;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ return &mdesc->desc;
+}
+
+static int __devinit mpc_dma_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *dn = op->node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct mpc_dma *mdma;
+ struct mpc_dma_chan *mchan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ int retval, i;
+
+ mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
+ if (!mdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mdma->irq = irq_of_parse_and_map(dn, 0);
+ if (mdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ regs_start = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ mdma->regs = devm_ioremap(dev, regs_start, regs_size);
+ if (!mdma->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ + MPC_DMA_TCD_OFFSET);
+
+ retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
+ mdma);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&mdma->error_status_lock);
+
+ dma = &mdma->dma;
+ dma->dev = dev;
+ dma->chancnt = MPC_DMA_CHANNELS;
+ dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = mpc_dma_free_chan_resources;
+ dma->device_issue_pending = mpc_dma_issue_pending;
+ dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ mchan->chan.device = dma;
+ mchan->chan.chan_id = i;
+ mchan->chan.cookie = 1;
+ mchan->completed_cookie = mchan->chan.cookie;
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->queued);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
+
+ /*
+ * Configure DMA Engine:
+ * - Dynamic clock,
+ * - Round-robin group arbitration,
+ * - Round-robin channel arbitration.
+ */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+ MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+
+ /* Disable hardware DMA requests */
+ out_be32(&mdma->regs->dmaerqh, 0);
+ out_be32(&mdma->regs->dmaerql, 0);
+
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeih, 0);
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Route interrupts to IPIC */
+ out_be32(&mdma->regs->dmaihsa, 0);
+ out_be32(&mdma->regs->dmailsa, 0);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, mdma);
+ retval = dma_async_device_register(dma);
+ if (retval) {
+ devm_free_irq(dev, mdma->irq, mdma);
+ irq_dispose_mapping(mdma->irq);
+ }
+
+ return retval;
+}
+
+static int __devexit mpc_dma_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mpc_dma *mdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&mdma->dma);
+ devm_free_irq(dev, mdma->irq, mdma);
+ irq_dispose_mapping(mdma->irq);
+
+ return 0;
+}
+
+static struct of_device_id mpc_dma_match[] = {
+ { .compatible = "fsl,mpc5121-dma", },
+ {},
+};
+
+static struct of_platform_driver mpc_dma_driver = {
+ .match_table = mpc_dma_match,
+ .probe = mpc_dma_probe,
+ .remove = __devexit_p(mpc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc_dma_init(void)
+{
+ return of_register_platform_driver(&mpc_dma_driver);
+}
+module_init(mpc_dma_init);
+
+static void __exit mpc_dma_exit(void)
+{
+ of_unregister_platform_driver(&mpc_dma_driver);
+}
+module_exit(mpc_dma_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 0a3478e910f..e69d87f24a2 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4940,7 +4940,7 @@ out_free:
return ret;
}
-static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
+static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
{ .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", },
{},
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index d10cc899c46..5d17e09cb62 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -24,8 +24,10 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <cpu/dma.h>
-#include <asm/dma-sh.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/dmaengine.h>
+
#include "shdma.h"
/* DMA descriptor control */
@@ -38,33 +40,32 @@ enum sh_dmae_desc_status {
};
#define NR_DESCS_PER_CHANNEL 32
-/*
- * Define the default configuration for dual address memory-memory transfer.
- * The 0x400 value represents auto-request, external->external.
- *
- * And this driver set 4byte burst mode.
- * If you want to change mode, you need to change RS_DEFAULT of value.
- * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
- */
-#define RS_DEFAULT (RS_DUAL)
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+
+/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
-#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
- ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
+ __raw_writel(data, sh_dc->base + reg / sizeof(u32));
}
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
- return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
+ return __raw_readl(sh_dc->base + reg / sizeof(u32));
}
-static void dmae_init(struct sh_dmae_chan *sh_chan)
+static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- u32 chcr = RS_DEFAULT; /* default is DUAL mode */
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
+}
+
+static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
+{
+ __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
}
/*
@@ -72,50 +73,75 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
*
* SH7780 has two DMAOR register
*/
-static void sh_dmae_ctl_stop(int id)
+static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
{
- unsigned short dmaor = dmaor_read_reg(id);
+ unsigned short dmaor = dmaor_read(shdev);
- dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
- dmaor_write_reg(id, dmaor);
+ dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
}
-static int sh_dmae_rst(int id)
+static int sh_dmae_rst(struct sh_dmae_device *shdev)
{
unsigned short dmaor;
- sh_dmae_ctl_stop(id);
- dmaor = dmaor_read_reg(id) | DMAOR_INIT;
+ sh_dmae_ctl_stop(shdev);
+ dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
- dmaor_write_reg(id, dmaor);
- if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
- pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
+ dmaor_write(shdev, dmaor);
+ if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
+ pr_warning("dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
-static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
- if (chcr & CHCR_DE) {
- if (!(chcr & CHCR_TE))
- return -EBUSY; /* working */
- }
- return 0; /* waiting */
+
+ if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+ return true; /* working */
+
+ return false; /* waiting */
}
-static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
+static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
- return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
+ struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
+ struct sh_dmae_device, common);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
+ ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
+
+ if (cnt >= pdata->ts_shift_num)
+ cnt = 0;
+
+ return pdata->ts_shift[cnt];
+}
+
+static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
+{
+ struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
+ struct sh_dmae_device, common);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int i;
+
+ for (i = 0; i < pdata->ts_shift_num; i++)
+ if (pdata->ts_shift[i] == l2size)
+ break;
+
+ if (i == pdata->ts_shift_num)
+ i = 0;
+
+ return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
+ ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
}
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
{
sh_dmae_writel(sh_chan, hw->sar, SAR);
sh_dmae_writel(sh_chan, hw->dar, DAR);
- sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
+ sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
}
static void dmae_start(struct sh_dmae_chan *sh_chan)
@@ -123,7 +149,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan)
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
chcr |= CHCR_DE | CHCR_IE;
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
}
static void dmae_halt(struct sh_dmae_chan *sh_chan)
@@ -134,55 +160,44 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
sh_dmae_writel(sh_chan, chcr, CHCR);
}
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+ /*
+ * Default configuration for dual address memory-memory transfer.
+ * 0x400 represents auto-request.
+ */
+ u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+ LOG2_DEFAULT_XFER_SIZE);
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
+ sh_dmae_writel(sh_chan, chcr, CHCR);
+}
+
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
- int ret = dmae_is_busy(sh_chan);
/* When DMA was working, can not set data to CHCR */
- if (ret)
- return ret;
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
sh_dmae_writel(sh_chan, val, CHCR);
+
return 0;
}
-#define DMARS1_ADDR 0x04
-#define DMARS2_ADDR 0x08
-#define DMARS_SHIFT 8
-#define DMARS_CHAN_MSK 0x01
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
- u32 addr;
- int shift = 0;
- int ret = dmae_is_busy(sh_chan);
- if (ret)
- return ret;
-
- if (sh_chan->id & DMARS_CHAN_MSK)
- shift = DMARS_SHIFT;
-
- switch (sh_chan->id) {
- /* DMARS0 */
- case 0:
- case 1:
- addr = SH_DMARS_BASE;
- break;
- /* DMARS1 */
- case 2:
- case 3:
- addr = (SH_DMARS_BASE + DMARS1_ADDR);
- break;
- /* DMARS2 */
- case 4:
- case 5:
- addr = (SH_DMARS_BASE + DMARS2_ADDR);
- break;
- default:
- return -EINVAL;
- }
+ struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
+ struct sh_dmae_device, common);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+ u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
+ int shift = chan_pdata->dmars_bit;
+
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
- ctrl_outw((val << shift) |
- (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
- addr);
+ __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
+ addr);
return 0;
}
@@ -250,10 +265,54 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
return NULL;
}
+static struct sh_dmae_slave_config *sh_dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+{
+ struct dma_device *dma_dev = sh_chan->common.device;
+ struct sh_dmae_device *shdev = container_of(dma_dev,
+ struct sh_dmae_device, common);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int i;
+
+ if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0; i < pdata->slave_num; i++)
+ if (pdata->slave[i].slave_id == slave_id)
+ return pdata->slave + i;
+
+ return NULL;
+}
+
static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc;
+ struct sh_dmae_slave *param = chan->private;
+
+ pm_runtime_get_sync(sh_chan->dev);
+
+ /*
+ * This relies on the guarantee from dmaengine that alloc_chan_resources
+ * never runs concurrently with itself or free_chan_resources.
+ */
+ if (param) {
+ struct sh_dmae_slave_config *cfg;
+
+ cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
+ if (!cfg)
+ return -EINVAL;
+
+ if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
+ return -EBUSY;
+
+ param->config = cfg;
+
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
+ dmae_init(sh_chan);
+ }
spin_lock_bh(&sh_chan->desc_lock);
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -274,6 +333,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
}
spin_unlock_bh(&sh_chan->desc_lock);
+ if (!sh_chan->descs_allocated)
+ pm_runtime_put(sh_chan->dev);
+
return sh_chan->descs_allocated;
}
@@ -285,11 +347,20 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
+ int descs = sh_chan->descs_allocated;
+
+ dmae_halt(sh_chan);
/* Prepared and not submitted descriptors can still be on the queue */
if (!list_empty(&sh_chan->ld_queue))
sh_dmae_chan_ld_cleanup(sh_chan, true);
+ if (chan->private) {
+ /* The caller is holding dma_list_mutex */
+ struct sh_dmae_slave *param = chan->private;
+ clear_bit(param->slave_id, sh_dmae_slave_used);
+ }
+
spin_lock_bh(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
@@ -297,27 +368,104 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock);
+ if (descs > 0)
+ pm_runtime_put(sh_chan->dev);
+
list_for_each_entry_safe(desc, _desc, &list, node)
kfree(desc);
}
-static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
- size_t len, unsigned long flags)
+/**
+ * sh_dmae_add_desc - get, set up and return one transfer descriptor
+ * @sh_chan: DMA channel
+ * @flags: DMA transfer flags
+ * @dest: destination DMA address, incremented when direction equals
+ * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ * @src: source DMA address, incremented when direction equals
+ * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ * @len: DMA transfer length
+ * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
+ * @direction: needed for slave DMA to decide which address to keep constant,
+ * equals DMA_BIDIRECTIONAL for MEMCPY
+ * Returns 0 or an error
+ * Locks: called with desc_lock held
+ */
+static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
+ unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
+ struct sh_desc **first, enum dma_data_direction direction)
{
- struct sh_dmae_chan *sh_chan;
- struct sh_desc *first = NULL, *prev = NULL, *new;
+ struct sh_desc *new;
size_t copy_size;
- LIST_HEAD(tx_list);
- int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
- if (!chan)
+ if (!*len)
return NULL;
- if (!len)
+ /* Allocate the link descriptor from the free list */
+ new = sh_dmae_get_desc(sh_chan);
+ if (!new) {
+ dev_err(sh_chan->dev, "No free link descriptor available\n");
return NULL;
+ }
- sh_chan = to_sh_chan(chan);
+ copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
+
+ new->hw.sar = *src;
+ new->hw.dar = *dest;
+ new->hw.tcr = copy_size;
+
+ if (!*first) {
+ /* First desc */
+ new->async_tx.cookie = -EBUSY;
+ *first = new;
+ } else {
+ /* Other desc - invisible to the user */
+ new->async_tx.cookie = -EINVAL;
+ }
+
+ dev_dbg(sh_chan->dev,
+ "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
+ copy_size, *len, *src, *dest, &new->async_tx,
+ new->async_tx.cookie, sh_chan->xmit_shift);
+
+ new->mark = DESC_PREPARED;
+ new->async_tx.flags = flags;
+ new->direction = direction;
+
+ *len -= copy_size;
+ if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+ *src += copy_size;
+ if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+ *dest += copy_size;
+
+ return new;
+}
+
+/*
+ * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
+ struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct scatterlist *sg;
+ struct sh_desc *first = NULL, *new = NULL /* compiler... */;
+ LIST_HEAD(tx_list);
+ int chunks = 0;
+ int i;
+
+ if (!sg_len)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
+ (SH_DMA_TCR_MAX + 1);
/* Have to lock the whole loop to protect against concurrent release */
spin_lock_bh(&sh_chan->desc_lock);
@@ -333,49 +481,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
* only during this function, then they are immediately spliced
* back onto the free list in form of a chain
*/
- do {
- /* Allocate the link descriptor from the free list */
- new = sh_dmae_get_desc(sh_chan);
- if (!new) {
- dev_err(sh_chan->dev,
- "No free memory for link descriptor\n");
- list_for_each_entry(new, &tx_list, node)
- new->mark = DESC_IDLE;
- list_splice(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
- return NULL;
- }
-
- copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
-
- new->hw.sar = dma_src;
- new->hw.dar = dma_dest;
- new->hw.tcr = copy_size;
- if (!first) {
- /* First desc */
- new->async_tx.cookie = -EBUSY;
- first = new;
- } else {
- /* Other desc - invisible to the user */
- new->async_tx.cookie = -EINVAL;
- }
-
- dev_dbg(sh_chan->dev,
- "chaining %u of %u with %p, dst %x, cookie %d\n",
- copy_size, len, &new->async_tx, dma_dest,
- new->async_tx.cookie);
-
- new->mark = DESC_PREPARED;
- new->async_tx.flags = flags;
- new->chunks = chunks--;
-
- prev = new;
- len -= copy_size;
- dma_src += copy_size;
- dma_dest += copy_size;
- /* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &tx_list);
- } while (len);
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(sg);
+ size_t len = sg_dma_len(sg);
+
+ if (!len)
+ goto err_get_desc;
+
+ do {
+ dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
+ i, sg, len, (unsigned long long)sg_addr);
+
+ if (direction == DMA_FROM_DEVICE)
+ new = sh_dmae_add_desc(sh_chan, flags,
+ &sg_addr, addr, &len, &first,
+ direction);
+ else
+ new = sh_dmae_add_desc(sh_chan, flags,
+ addr, &sg_addr, &len, &first,
+ direction);
+ if (!new)
+ goto err_get_desc;
+
+ new->chunks = chunks--;
+ list_add_tail(&new->node, &tx_list);
+ } while (len);
+ }
if (new != first)
new->async_tx.cookie = -ENOSPC;
@@ -386,6 +517,90 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
spin_unlock_bh(&sh_chan->desc_lock);
return &first->async_tx;
+
+err_get_desc:
+ list_for_each_entry(new, &tx_list, node)
+ new->mark = DESC_IDLE;
+ list_splice(&tx_list, &sh_chan->ld_free);
+
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct sh_dmae_chan *sh_chan;
+ struct scatterlist sg;
+
+ if (!chan || !len)
+ return NULL;
+
+ chan->private = NULL;
+
+ sh_chan = to_sh_chan(chan);
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
+ offset_in_page(dma_src));
+ sg_dma_address(&sg) = dma_src;
+ sg_dma_len(&sg) = len;
+
+ return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+ flags);
+}
+
+static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct sh_dmae_slave *param;
+ struct sh_dmae_chan *sh_chan;
+
+ if (!chan)
+ return NULL;
+
+ sh_chan = to_sh_chan(chan);
+ param = chan->private;
+
+ /* Someone calling slave DMA on a public channel? */
+ if (!param || !sg_len) {
+ dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
+ __func__, param, sg_len, param ? param->slave_id : -1);
+ return NULL;
+ }
+
+ /*
+ * if (param != NULL), this is a successfully requested slave channel,
+ * therefore param->config != NULL too.
+ */
+ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
+ direction, flags);
+}
+
+static void sh_dmae_terminate_all(struct dma_chan *chan)
+{
+ struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+
+ if (!chan)
+ return;
+
+ dmae_halt(sh_chan);
+
+ spin_lock_bh(&sh_chan->desc_lock);
+ if (!list_empty(&sh_chan->ld_queue)) {
+ /* Record partial transfer */
+ struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
+ struct sh_desc, node);
+ desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
+ sh_chan->xmit_shift;
+
+ }
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ sh_dmae_chan_ld_cleanup(sh_chan, true);
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -419,7 +634,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
cookie = tx->cookie;
if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
- BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
+ if (sh_chan->completed_cookie != desc->cookie - 1)
+ dev_dbg(sh_chan->dev,
+ "Completing cookie %d, expected %d\n",
+ desc->cookie,
+ sh_chan->completed_cookie + 1);
sh_chan->completed_cookie = desc->cookie;
}
@@ -483,7 +702,7 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
- struct sh_desc *sd;
+ struct sh_desc *desc;
spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
@@ -492,11 +711,14 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
return;
}
- /* Find the first un-transfer desciptor */
- list_for_each_entry(sd, &sh_chan->ld_queue, node)
- if (sd->mark == DESC_SUBMITTED) {
+ /* Find the first not transferred desciptor */
+ list_for_each_entry(desc, &sh_chan->ld_queue, node)
+ if (desc->mark == DESC_SUBMITTED) {
+ dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
+ desc->async_tx.cookie, sh_chan->id,
+ desc->hw.tcr, desc->hw.sar, desc->hw.dar);
/* Get the ld start address from ld_queue */
- dmae_set_reg(sh_chan, &sd->hw);
+ dmae_set_reg(sh_chan, &desc->hw);
dmae_start(sh_chan);
break;
}
@@ -518,6 +740,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
+ enum dma_status status;
sh_dmae_chan_ld_cleanup(sh_chan, false);
@@ -531,7 +754,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
if (used)
*used = last_used;
- return dma_async_is_complete(cookie, last_complete, last_used);
+ spin_lock_bh(&sh_chan->desc_lock);
+
+ status = dma_async_is_complete(cookie, last_complete, last_used);
+
+ /*
+ * If we don't find cookie on the queue, it has been aborted and we have
+ * to report error
+ */
+ if (status != DMA_SUCCESS) {
+ struct sh_desc *desc;
+ status = DMA_ERROR;
+ list_for_each_entry(desc, &sh_chan->ld_queue, node)
+ if (desc->cookie == cookie) {
+ status = DMA_IN_PROGRESS;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&sh_chan->desc_lock);
+
+ return status;
}
static irqreturn_t sh_dmae_interrupt(int irq, void *data)
@@ -554,40 +797,32 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
#if defined(CONFIG_CPU_SH4)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
- int err = 0;
struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
+ int i;
- /* IRQ Multi */
- if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
- int cnt = 0;
- switch (irq) {
-#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
- case DMTE6_IRQ:
- cnt++;
-#endif
- case DMTE0_IRQ:
- if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
- disable_irq(irq);
- return IRQ_HANDLED;
+ /* halt the dma controller */
+ sh_dmae_ctl_stop(shdev);
+
+ /* We cannot detect, which channel caused the error, have to reset all */
+ for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan) {
+ struct sh_desc *desc;
+ /* Stop the channel */
+ dmae_halt(sh_chan);
+ /* Complete all */
+ list_for_each_entry(desc, &sh_chan->ld_queue, node) {
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+ desc->mark = DESC_IDLE;
+ if (tx->callback)
+ tx->callback(tx->callback_param);
}
- default:
- return IRQ_NONE;
- }
- } else {
- /* reset dma controller */
- err = sh_dmae_rst(0);
- if (err)
- return err;
-#ifdef SH_DMAC_BASE1
- if (shdev->pdata.mode & SHDMA_DMAOR1) {
- err = sh_dmae_rst(1);
- if (err)
- return err;
+ list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
}
-#endif
- disable_irq(irq);
- return IRQ_HANDLED;
}
+ sh_dmae_rst(shdev);
+
+ return IRQ_HANDLED;
}
#endif
@@ -596,11 +831,14 @@ static void dmae_do_tasklet(unsigned long data)
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
struct sh_desc *desc;
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+ u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
spin_lock(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
- if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
- desc->mark == DESC_SUBMITTED) {
+ if (desc->mark == DESC_SUBMITTED &&
+ ((desc->direction == DMA_FROM_DEVICE &&
+ (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
+ (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
desc->async_tx.cookie, &desc->async_tx,
desc->hw.dar);
@@ -615,19 +853,12 @@ static void dmae_do_tasklet(unsigned long data)
sh_dmae_chan_ld_cleanup(sh_chan, false);
}
-static unsigned int get_dmae_irq(unsigned int id)
-{
- unsigned int irq = 0;
- if (id < ARRAY_SIZE(dmte_irq_map))
- irq = dmte_irq_map[id];
- return irq;
-}
-
-static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
+static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+ int irq, unsigned long flags)
{
int err;
- unsigned int irq = get_dmae_irq(id);
- unsigned long irqflags = IRQF_DISABLED;
+ struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ struct platform_device *pdev = to_platform_device(shdev->common.dev);
struct sh_dmae_chan *new_sh_chan;
/* alloc channel */
@@ -638,8 +869,13 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
return -ENOMEM;
}
+ /* copy struct dma_device */
+ new_sh_chan->common.device = &shdev->common;
+
new_sh_chan->dev = shdev->common.dev;
new_sh_chan->id = id;
+ new_sh_chan->irq = irq;
+ new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
/* Init DMA tasklet */
tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
@@ -654,29 +890,20 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
INIT_LIST_HEAD(&new_sh_chan->ld_queue);
INIT_LIST_HEAD(&new_sh_chan->ld_free);
- /* copy struct dma_device */
- new_sh_chan->common.device = &shdev->common;
-
/* Add the channel to DMA device channel list */
list_add_tail(&new_sh_chan->common.device_node,
&shdev->common.channels);
shdev->common.chancnt++;
- if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
- irqflags = IRQF_SHARED;
-#if defined(DMTE6_IRQ)
- if (irq >= DMTE6_IRQ)
- irq = DMTE6_IRQ;
- else
-#endif
- irq = DMTE0_IRQ;
- }
-
- snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
- "sh-dmae%d", new_sh_chan->id);
+ if (pdev->id >= 0)
+ snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
+ "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
+ else
+ snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
+ "sh-dma%d", new_sh_chan->id);
/* set up channel irq */
- err = request_irq(irq, &sh_dmae_interrupt, irqflags,
+ err = request_irq(irq, &sh_dmae_interrupt, flags,
new_sh_chan->dev_id, new_sh_chan);
if (err) {
dev_err(shdev->common.dev, "DMA channel %d request_irq error "
@@ -684,11 +911,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
goto err_no_irq;
}
- /* CHCR register control function */
- new_sh_chan->set_chcr = dmae_set_chcr;
- /* DMARS register control function */
- new_sh_chan->set_dmars = dmae_set_dmars;
-
shdev->chan[id] = new_sh_chan;
return 0;
@@ -705,12 +927,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
if (shdev->chan[i]) {
- struct sh_dmae_chan *shchan = shdev->chan[i];
- if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
- free_irq(dmte_irq_map[i], shchan);
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
- list_del(&shchan->common.device_node);
- kfree(shchan);
+ free_irq(sh_chan->irq, sh_chan);
+
+ list_del(&sh_chan->common.device_node);
+ kfree(sh_chan);
shdev->chan[i] = NULL;
}
}
@@ -719,85 +941,164 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
static int __init sh_dmae_probe(struct platform_device *pdev)
{
- int err = 0, cnt, ecnt;
- unsigned long irqflags = IRQF_DISABLED;
-#if defined(CONFIG_CPU_SH4)
- int eirq[] = { DMAE0_IRQ,
-#if defined(DMAE1_IRQ)
- DMAE1_IRQ
-#endif
- };
-#endif
+ struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ unsigned long irqflags = IRQF_DISABLED,
+ chan_flag[SH_DMAC_MAX_CHANNELS] = {};
+ int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
+ int err, i, irq_cnt = 0, irqres = 0;
struct sh_dmae_device *shdev;
+ struct resource *chan, *dmars, *errirq_res, *chanirq_res;
/* get platform data */
- if (!pdev->dev.platform_data)
+ if (!pdata || !pdata->channel_num)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* DMARS area is optional, if absent, this controller cannot do slave DMA */
+ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /*
+ * IRQ resources:
+ * 1. there always must be at least one IRQ IO-resource. On SH4 it is
+ * the error IRQ, in which case it is the only IRQ in this resource:
+ * start == end. If it is the only IRQ resource, all channels also
+ * use the same IRQ.
+ * 2. DMA channel IRQ resources can be specified one per resource or in
+ * ranges (start != end)
+ * 3. iff all events (channels and, optionally, error) on this
+ * controller use the same IRQ, only one IRQ resource can be
+ * specified, otherwise there must be one IRQ per channel, even if
+ * some of them are equal
+ * 4. if all IRQs on this controller are equal or if some specific IRQs
+ * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
+ * requested with the IRQF_SHARED flag
+ */
+ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!chan || !errirq_res)
return -ENODEV;
+ if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
+ dev_err(&pdev->dev, "DMAC register region already claimed\n");
+ return -EBUSY;
+ }
+
+ if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
+ dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
+ err = -EBUSY;
+ goto ermrdmars;
+ }
+
+ err = -ENOMEM;
shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
if (!shdev) {
- dev_err(&pdev->dev, "No enough memory\n");
- return -ENOMEM;
+ dev_err(&pdev->dev, "Not enough memory\n");
+ goto ealloc;
+ }
+
+ shdev->chan_reg = ioremap(chan->start, resource_size(chan));
+ if (!shdev->chan_reg)
+ goto emapchan;
+ if (dmars) {
+ shdev->dmars = ioremap(dmars->start, resource_size(dmars));
+ if (!shdev->dmars)
+ goto emapdmars;
}
/* platform data */
- memcpy(&shdev->pdata, pdev->dev.platform_data,
- sizeof(struct sh_dmae_pdata));
+ shdev->pdata = pdata;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* reset dma controller */
- err = sh_dmae_rst(0);
+ err = sh_dmae_rst(shdev);
if (err)
goto rst_err;
- /* SH7780/85/23 has DMAOR1 */
- if (shdev->pdata.mode & SHDMA_DMAOR1) {
- err = sh_dmae_rst(1);
- if (err)
- goto rst_err;
- }
-
INIT_LIST_HEAD(&shdev->common.channels);
dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
+ if (dmars)
+ dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
+
shdev->common.device_alloc_chan_resources
= sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
shdev->common.device_is_tx_complete = sh_dmae_is_complete;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
+
+ /* Compulsory for DMA_SLAVE fields */
+ shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
+ shdev->common.device_terminate_all = sh_dmae_terminate_all;
+
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
- shdev->common.copy_align = 5;
+ shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
#if defined(CONFIG_CPU_SH4)
- /* Non Mix IRQ mode SH7722/SH7730 etc... */
- if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
+ chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!chanirq_res)
+ chanirq_res = errirq_res;
+ else
+ irqres++;
+
+ if (chanirq_res == errirq_res ||
+ (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
irqflags = IRQF_SHARED;
- eirq[0] = DMTE0_IRQ;
-#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
- eirq[1] = DMTE6_IRQ;
-#endif
+
+ errirq = errirq_res->start;
+
+ err = request_irq(errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA failed requesting irq #%d, error %d\n",
+ errirq, err);
+ goto eirq_err;
}
- for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
- err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
- if (err) {
- dev_err(&pdev->dev, "DMA device request_irq"
- "error (irq %d) with return %d\n",
- eirq[ecnt], err);
- goto eirq_err;
+#else
+ chanirq_res = errirq_res;
+#endif /* CONFIG_CPU_SH4 */
+
+ if (chanirq_res->start == chanirq_res->end &&
+ !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
+ /* Special case - all multiplexed */
+ for (; irq_cnt < pdata->channel_num; irq_cnt++) {
+ chan_irq[irq_cnt] = chanirq_res->start;
+ chan_flag[irq_cnt] = IRQF_SHARED;
}
+ } else {
+ do {
+ for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+ if ((errirq_res->flags & IORESOURCE_BITS) ==
+ IORESOURCE_IRQ_SHAREABLE)
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ else
+ chan_flag[irq_cnt] = IRQF_DISABLED;
+ dev_dbg(&pdev->dev,
+ "Found IRQ %d for channel %d\n",
+ i, irq_cnt);
+ chan_irq[irq_cnt++] = i;
+ }
+ chanirq_res = platform_get_resource(pdev,
+ IORESOURCE_IRQ, ++irqres);
+ } while (irq_cnt < pdata->channel_num && chanirq_res);
}
-#endif /* CONFIG_CPU_SH4 */
+
+ if (irq_cnt < pdata->channel_num)
+ goto eirqres;
/* Create DMA Channel */
- for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
- err = sh_dmae_chan_probe(shdev, cnt);
+ for (i = 0; i < pdata->channel_num; i++) {
+ err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
if (err)
goto chan_probe_err;
}
+ pm_runtime_put(&pdev->dev);
+
platform_set_drvdata(pdev, shdev);
dma_async_device_register(&shdev->common);
@@ -805,13 +1106,24 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
chan_probe_err:
sh_dmae_chan_remove(shdev);
-
+eirqres:
+#if defined(CONFIG_CPU_SH4)
+ free_irq(errirq, shdev);
eirq_err:
- for (ecnt-- ; ecnt >= 0; ecnt--)
- free_irq(eirq[ecnt], shdev);
-
+#endif
rst_err:
+ pm_runtime_put(&pdev->dev);
+ if (dmars)
+ iounmap(shdev->dmars);
+emapdmars:
+ iounmap(shdev->chan_reg);
+emapchan:
kfree(shdev);
+ealloc:
+ if (dmars)
+ release_mem_region(dmars->start, resource_size(dmars));
+ermrdmars:
+ release_mem_region(chan->start, resource_size(chan));
return err;
}
@@ -819,36 +1131,39 @@ rst_err:
static int __exit sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ struct resource *res;
+ int errirq = platform_get_irq(pdev, 0);
dma_async_device_unregister(&shdev->common);
- if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
- free_irq(DMTE0_IRQ, shdev);
-#if defined(DMTE6_IRQ)
- free_irq(DMTE6_IRQ, shdev);
-#endif
- }
+ if (errirq > 0)
+ free_irq(errirq, shdev);
/* channel data remove */
sh_dmae_chan_remove(shdev);
- if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
- free_irq(DMAE0_IRQ, shdev);
-#if defined(DMAE1_IRQ)
- free_irq(DMAE1_IRQ, shdev);
-#endif
- }
+ pm_runtime_disable(&pdev->dev);
+
+ if (shdev->dmars)
+ iounmap(shdev->dmars);
+ iounmap(shdev->chan_reg);
+
kfree(shdev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
return 0;
}
static void sh_dmae_shutdown(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
- sh_dmae_ctl_stop(0);
- if (shdev->pdata.mode & SHDMA_DMAOR1)
- sh_dmae_ctl_stop(1);
+ sh_dmae_ctl_stop(shdev);
}
static struct platform_driver sh_dmae_driver = {
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 108f1cffb6f..153609a1e96 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,22 +17,9 @@
#include <linux/interrupt.h>
#include <linux/list.h>
-#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
-
-struct sh_dmae_regs {
- u32 sar; /* SAR / source address */
- u32 dar; /* DAR / destination address */
- u32 tcr; /* TCR / transfer count */
-};
+#include <asm/dmaengine.h>
-struct sh_desc {
- struct sh_dmae_regs hw;
- struct list_head node;
- struct dma_async_tx_descriptor async_tx;
- dma_cookie_t cookie;
- int chunks;
- int mark;
-};
+#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
struct device;
@@ -45,19 +32,19 @@ struct sh_dmae_chan {
struct device *dev; /* Channel device */
struct tasklet_struct tasklet; /* Tasklet */
int descs_allocated; /* desc count */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ int irq;
int id; /* Raw id of this channel */
+ u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
-
- /* Set chcr */
- int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
- /* Set DMA resource */
- int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res);
};
struct sh_dmae_device {
struct dma_device common;
- struct sh_dmae_chan *chan[MAX_DMA_CHANNELS];
- struct sh_dmae_pdata pdata;
+ struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
+ struct sh_dmae_pdata *pdata;
+ u32 __iomem *chan_reg;
+ u16 __iomem *dmars;
};
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)