summaryrefslogtreecommitdiffstats
path: root/drivers/dma/sh
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/sh')
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-base.c943
-rw-r--r--drivers/dma/sh/shdma.c955
-rw-r--r--drivers/dma/sh/shdma.h64
4 files changed, 1964 insertions, 0 deletions
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
new file mode 100644
index 00000000000..54ae9572b0a
--- /dev/null
+++ b/drivers/dma/sh/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SH_DMAE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE) += shdma.o
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
new file mode 100644
index 00000000000..f4cd946d259
--- /dev/null
+++ b/drivers/dma/sh/shdma-base.c
@@ -0,0 +1,943 @@
+/*
+ * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
+ *
+ * extracted from shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/shdma-base.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+
+/* DMA descriptor control */
+enum shdma_desc_status {
+ DESC_IDLE,
+ DESC_PREPARED,
+ DESC_SUBMITTED,
+ DESC_COMPLETED, /* completed, have to call callback */
+ DESC_WAITING, /* callback called, waiting for ack / re-submit */
+};
+
+#define NR_DESCS_PER_CHANNEL 32
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
+
+/*
+ * For slave DMA we assume, that there is a finite number of DMA slaves in the
+ * system, and that each such slave can only use a finite number of channels.
+ * We use slave channel IDs to make sure, that no such slave channel ID is
+ * allocated more than once.
+ */
+static unsigned int slave_num = 256;
+module_param(slave_num, uint, 0444);
+
+/* A bitmask with slave_num bits */
+static unsigned long *shdma_slave_used;
+
+/* Called under spin_lock_irq(&schan->chan_lock") */
+static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *sdesc;
+
+ /* DMA work check */
+ if (ops->channel_busy(schan))
+ return;
+
+ /* Find the first not transferred descriptor */
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
+ if (sdesc->mark == DESC_SUBMITTED) {
+ ops->start_xfer(schan, sdesc);
+ break;
+ }
+}
+
+static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct shdma_desc *chunk, *c, *desc =
+ container_of(tx, struct shdma_desc, async_tx),
+ *last = desc;
+ struct shdma_chan *schan = to_shdma_chan(tx->chan);
+ dma_async_tx_callback callback = tx->callback;
+ dma_cookie_t cookie;
+ bool power_up;
+
+ spin_lock_irq(&schan->chan_lock);
+
+ power_up = list_empty(&schan->ld_queue);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Mark all chunks of this descriptor as submitted, move to the queue */
+ list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+ /*
+ * All chunks are on the global ld_free, so, we have to find
+ * the end of the chain ourselves
+ */
+ if (chunk != desc && (chunk->mark == DESC_IDLE ||
+ chunk->async_tx.cookie > 0 ||
+ chunk->async_tx.cookie == -EBUSY ||
+ &chunk->node == &schan->ld_free))
+ break;
+ chunk->mark = DESC_SUBMITTED;
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ chunk->cookie = cookie;
+ list_move_tail(&chunk->node, &schan->ld_queue);
+ last = chunk;
+
+ dev_dbg(schan->dev, "submit #%d@%p on %d\n",
+ tx->cookie, &last->async_tx, schan->id);
+ }
+
+ last->async_tx.callback = callback;
+ last->async_tx.callback_param = tx->callback_param;
+
+ if (power_up) {
+ int ret;
+ schan->pm_state = SHDMA_PM_BUSY;
+
+ ret = pm_runtime_get(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ if (ret < 0)
+ dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+
+ pm_runtime_barrier(schan->dev);
+
+ spin_lock_irq(&schan->chan_lock);
+
+ /* Have we been reset, while waiting? */
+ if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
+ struct shdma_dev *sdev =
+ to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ dev_dbg(schan->dev, "Bring up channel %d\n",
+ schan->id);
+ /*
+ * TODO: .xfer_setup() might fail on some platforms.
+ * Make it int then, on error remove chunks from the
+ * queue again
+ */
+ ops->setup_xfer(schan, schan->slave_id);
+
+ if (schan->pm_state == SHDMA_PM_PENDING)
+ shdma_chan_xfer_ld_queue(schan);
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ }
+ } else {
+ /*
+ * Tell .device_issue_pending() not to run the queue, interrupts
+ * will do it anyway
+ */
+ schan->pm_state = SHDMA_PM_PENDING;
+ }
+
+ spin_unlock_irq(&schan->chan_lock);
+
+ return cookie;
+}
+
+/* Called with desc_lock held */
+static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
+{
+ struct shdma_desc *sdesc;
+
+ list_for_each_entry(sdesc, &schan->ld_free, node)
+ if (sdesc->mark != DESC_PREPARED) {
+ BUG_ON(sdesc->mark != DESC_IDLE);
+ list_del(&sdesc->node);
+ return sdesc;
+ }
+
+ return NULL;
+}
+
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int ret;
+
+ if (slave_id < 0 || slave_id >= slave_num)
+ return -EINVAL;
+
+ if (test_and_set_bit(slave_id, shdma_slave_used))
+ return -EBUSY;
+
+ ret = ops->set_slave(schan, slave_id, false);
+ if (ret < 0) {
+ clear_bit(slave_id, shdma_slave_used);
+ return ret;
+ }
+
+ schan->slave_id = slave_id;
+
+ return 0;
+}
+
+/*
+ * This is the standard shdma filter function to be used as a replacement to the
+ * "old" method, using the .private pointer. If for some reason you allocate a
+ * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
+ * parameter. If this filter is used, the slave driver, after calling
+ * dma_request_channel(), will also have to call dmaengine_slave_config() with
+ * .slave_id, .direction, and either .src_addr or .dst_addr set.
+ * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
+ * capability! If this becomes a requirement, hardware glue drivers, using this
+ * services would have to provide their own filters, which first would check
+ * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
+ * this, and only then, in case of a match, call this common filter.
+ */
+bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int slave_id = (int)arg;
+ int ret;
+
+ if (slave_id < 0)
+ /* No slave requested - arbitrary channel */
+ return true;
+
+ if (slave_id >= slave_num)
+ return false;
+
+ ret = ops->set_slave(schan, slave_id, true);
+ if (ret < 0)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(shdma_chan_filter);
+
+static int shdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *desc;
+ struct shdma_slave *slave = chan->private;
+ int ret, i;
+
+ /*
+ * This relies on the guarantee from dmaengine that alloc_chan_resources
+ * never runs concurrently with itself or free_chan_resources.
+ */
+ if (slave) {
+ /* Legacy mode: .private is set in filter */
+ ret = shdma_setup_slave(schan, slave->slave_id);
+ if (ret < 0)
+ goto esetslave;
+ } else {
+ schan->slave_id = -EINVAL;
+ }
+
+ schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
+ sdev->desc_size, GFP_KERNEL);
+ if (!schan->desc) {
+ ret = -ENOMEM;
+ goto edescalloc;
+ }
+ schan->desc_num = NR_DESCS_PER_CHANNEL;
+
+ for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
+ desc = ops->embedded_desc(schan->desc, i);
+ dma_async_tx_descriptor_init(&desc->async_tx,
+ &schan->dma_chan);
+ desc->async_tx.tx_submit = shdma_tx_submit;
+ desc->mark = DESC_IDLE;
+
+ list_add(&desc->node, &schan->ld_free);
+ }
+
+ return NR_DESCS_PER_CHANNEL;
+
+edescalloc:
+ if (slave)
+esetslave:
+ clear_bit(slave->slave_id, shdma_slave_used);
+ chan->private = NULL;
+ return ret;
+}
+
+static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
+{
+ struct shdma_desc *desc, *_desc;
+ /* Is the "exposed" head of a chain acked? */
+ bool head_acked = false;
+ dma_cookie_t cookie = 0;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->chan_lock, flags);
+ list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+ BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+ BUG_ON(desc->mark != DESC_SUBMITTED &&
+ desc->mark != DESC_COMPLETED &&
+ desc->mark != DESC_WAITING);
+
+ /*
+ * queue is ordered, and we use this loop to (1) clean up all
+ * completed descriptors, and to (2) update descriptor flags of
+ * any chunks in a (partially) completed chain
+ */
+ if (!all && desc->mark == DESC_SUBMITTED &&
+ desc->cookie != cookie)
+ break;
+
+ if (tx->cookie > 0)
+ cookie = tx->cookie;
+
+ if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+ if (schan->dma_chan.completed_cookie != desc->cookie - 1)
+ dev_dbg(schan->dev,
+ "Completing cookie %d, expected %d\n",
+ desc->cookie,
+ schan->dma_chan.completed_cookie + 1);
+ schan->dma_chan.completed_cookie = desc->cookie;
+ }
+
+ /* Call callback on the last chunk */
+ if (desc->mark == DESC_COMPLETED && tx->callback) {
+ desc->mark = DESC_WAITING;
+ callback = tx->callback;
+ param = tx->callback_param;
+ dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
+ tx->cookie, tx, schan->id);
+ BUG_ON(desc->chunks != 1);
+ break;
+ }
+
+ if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+ if (desc->mark == DESC_COMPLETED) {
+ BUG_ON(tx->cookie < 0);
+ desc->mark = DESC_WAITING;
+ }
+ head_acked = async_tx_test_ack(tx);
+ } else {
+ switch (desc->mark) {
+ case DESC_COMPLETED:
+ desc->mark = DESC_WAITING;
+ /* Fall through */
+ case DESC_WAITING:
+ if (head_acked)
+ async_tx_ack(&desc->async_tx);
+ }
+ }
+
+ dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
+ tx, tx->cookie);
+
+ if (((desc->mark == DESC_COMPLETED ||
+ desc->mark == DESC_WAITING) &&
+ async_tx_test_ack(&desc->async_tx)) || all) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+
+ list_move(&desc->node, &schan->ld_free);
+
+ if (list_empty(&schan->ld_queue)) {
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
+ pm_runtime_put(schan->dev);
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ }
+ }
+ }
+
+ if (all && !callback)
+ /*
+ * Terminating and the loop completed normally: forgive
+ * uncompleted cookies
+ */
+ schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ if (callback)
+ callback(param);
+
+ return callback;
+}
+
+/*
+ * shdma_chan_ld_cleanup - Clean up link descriptors
+ *
+ * Clean up the ld_queue of DMA channel.
+ */
+static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
+{
+ while (__ld_cleanup(schan, all))
+ ;
+}
+
+/*
+ * shdma_free_chan_resources - Free all resources of the channel.
+ */
+static void shdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
+ const struct shdma_ops *ops = sdev->ops;
+ LIST_HEAD(list);
+
+ /* Protect against ISR */
+ spin_lock_irq(&schan->chan_lock);
+ ops->halt_channel(schan);
+ spin_unlock_irq(&schan->chan_lock);
+
+ /* Now no new interrupts will occur */
+
+ /* Prepared and not submitted descriptors can still be on the queue */
+ if (!list_empty(&schan->ld_queue))
+ shdma_chan_ld_cleanup(schan, true);
+
+ if (schan->slave_id >= 0) {
+ /* The caller is holding dma_list_mutex */
+ clear_bit(schan->slave_id, shdma_slave_used);
+ chan->private = NULL;
+ }
+
+ spin_lock_irq(&schan->chan_lock);
+
+ list_splice_init(&schan->ld_free, &list);
+ schan->desc_num = 0;
+
+ spin_unlock_irq(&schan->chan_lock);
+
+ kfree(schan->desc);
+}
+
+/**
+ * shdma_add_desc - get, set up and return one transfer descriptor
+ * @schan: DMA channel
+ * @flags: DMA transfer flags
+ * @dst: destination DMA address, incremented when direction equals
+ * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
+ * @src: source DMA address, incremented when direction equals
+ * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
+ * @len: DMA transfer length
+ * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
+ * @direction: needed for slave DMA to decide which address to keep constant,
+ * equals DMA_MEM_TO_MEM for MEMCPY
+ * Returns 0 or an error
+ * Locks: called with desc_lock held
+ */
+static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
+ unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
+ struct shdma_desc **first, enum dma_transfer_direction direction)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *new;
+ size_t copy_size = *len;
+
+ if (!copy_size)
+ return NULL;
+
+ /* Allocate the link descriptor from the free list */
+ new = shdma_get_desc(schan);
+ if (!new) {
+ dev_err(schan->dev, "No free link descriptor available\n");
+ return NULL;
+ }
+
+ ops->desc_setup(schan, new, *src, *dst, &copy_size);
+
+ if (!*first) {
+ /* First desc */
+ new->async_tx.cookie = -EBUSY;
+ *first = new;
+ } else {
+ /* Other desc - invisible to the user */
+ new->async_tx.cookie = -EINVAL;
+ }
+
+ dev_dbg(schan->dev,
+ "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
+ copy_size, *len, *src, *dst, &new->async_tx,
+ new->async_tx.cookie);
+
+ new->mark = DESC_PREPARED;
+ new->async_tx.flags = flags;
+ new->direction = direction;
+ new->partial = 0;
+
+ *len -= copy_size;
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
+ *src += copy_size;
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
+ *dst += copy_size;
+
+ return new;
+}
+
+/*
+ * shdma_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
+ struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct scatterlist *sg;
+ struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
+ LIST_HEAD(tx_list);
+ int chunks = 0;
+ unsigned long irq_flags;
+ int i;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
+
+ /* Have to lock the whole loop to protect against concurrent release */
+ spin_lock_irqsave(&schan->chan_lock, irq_flags);
+
+ /*
+ * Chaining:
+ * first descriptor is what user is dealing with in all API calls, its
+ * cookie is at first set to -EBUSY, at tx-submit to a positive
+ * number
+ * if more than one chunk is needed further chunks have cookie = -EINVAL
+ * the last chunk, if not equal to the first, has cookie = -ENOSPC
+ * all chunks are linked onto the tx_list head with their .node heads
+ * only during this function, then they are immediately spliced
+ * back onto the free list in form of a chain
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(sg);
+ size_t len = sg_dma_len(sg);
+
+ if (!len)
+ goto err_get_desc;
+
+ do {
+ dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
+ i, sg, len, (unsigned long long)sg_addr);
+
+ if (direction == DMA_DEV_TO_MEM)
+ new = shdma_add_desc(schan, flags,
+ &sg_addr, addr, &len, &first,
+ direction);
+ else
+ new = shdma_add_desc(schan, flags,
+ addr, &sg_addr, &len, &first,
+ direction);
+ if (!new)
+ goto err_get_desc;
+
+ new->chunks = chunks--;
+ list_add_tail(&new->node, &tx_list);
+ } while (len);
+ }
+
+ if (new != first)
+ new->async_tx.cookie = -ENOSPC;
+
+ /* Put them back on the free list, so, they don't get lost */
+ list_splice_tail(&tx_list, &schan->ld_free);
+
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
+
+ return &first->async_tx;
+
+err_get_desc:
+ list_for_each_entry(new, &tx_list, node)
+ new->mark = DESC_IDLE;
+ list_splice(&tx_list, &schan->ld_free);
+
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *shdma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct scatterlist sg;
+
+ if (!chan || !len)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
+ offset_in_page(dma_src));
+ sg_dma_address(&sg) = dma_src;
+ sg_dma_len(&sg) = len;
+
+ return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int slave_id = schan->slave_id;
+ dma_addr_t slave_addr;
+
+ if (!chan)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (slave_id < 0 || !sg_len) {
+ dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
+ __func__, sg_len, slave_id);
+ return NULL;
+ }
+
+ slave_addr = ops->slave_addr(schan);
+
+ return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ direction, flags);
+}
+
+static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct dma_slave_config *config;
+ unsigned long flags;
+ int ret;
+
+ if (!chan)
+ return -EINVAL;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&schan->chan_lock, flags);
+ ops->halt_channel(schan);
+
+ if (ops->get_partial && !list_empty(&schan->ld_queue)) {
+ /* Record partial transfer */
+ struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
+ struct shdma_desc, node);
+ desc->partial = ops->get_partial(schan, desc);
+ }
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ shdma_chan_ld_cleanup(schan, true);
+ break;
+ case DMA_SLAVE_CONFIG:
+ /*
+ * So far only .slave_id is used, but the slave drivers are
+ * encouraged to also set a transfer direction and an address.
+ */
+ if (!arg)
+ return -EINVAL;
+ /*
+ * We could lock this, but you shouldn't be configuring the
+ * channel, while using it...
+ */
+ config = (struct dma_slave_config *)arg;
+ ret = shdma_setup_slave(schan, config->slave_id);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void shdma_issue_pending(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+
+ spin_lock_irq(&schan->chan_lock);
+ if (schan->pm_state == SHDMA_PM_ESTABLISHED)
+ shdma_chan_xfer_ld_queue(schan);
+ else
+ schan->pm_state = SHDMA_PM_PENDING;
+ spin_unlock_irq(&schan->chan_lock);
+}
+
+static enum dma_status shdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ enum dma_status status;
+ unsigned long flags;
+
+ shdma_chan_ld_cleanup(schan, false);
+
+ spin_lock_irqsave(&schan->chan_lock, flags);
+
+ status = dma_cookie_status(chan, cookie, txstate);
+
+ /*
+ * If we don't find cookie on the queue, it has been aborted and we have
+ * to report error
+ */
+ if (status != DMA_SUCCESS) {
+ struct shdma_desc *sdesc;
+ status = DMA_ERROR;
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
+ if (sdesc->cookie == cookie) {
+ status = DMA_IN_PROGRESS;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ return status;
+}
+
+/* Called from error IRQ or NMI */
+bool shdma_reset(struct shdma_dev *sdev)
+{
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_chan *schan;
+ unsigned int handled = 0;
+ int i;
+
+ /* Reset all channels */
+ shdma_for_each_chan(schan, sdev, i) {
+ struct shdma_desc *sdesc;
+ LIST_HEAD(dl);
+
+ if (!schan)
+ continue;
+
+ spin_lock(&schan->chan_lock);
+
+ /* Stop the channel */
+ ops->halt_channel(schan);
+
+ list_splice_init(&schan->ld_queue, &dl);
+
+ if (!list_empty(&dl)) {
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
+ pm_runtime_put(schan->dev);
+ }
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+
+ spin_unlock(&schan->chan_lock);
+
+ /* Complete all */
+ list_for_each_entry(sdesc, &dl, node) {
+ struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
+ sdesc->mark = DESC_IDLE;
+ if (tx->callback)
+ tx->callback(tx->callback_param);
+ }
+
+ spin_lock(&schan->chan_lock);
+ list_splice(&dl, &schan->ld_free);
+ spin_unlock(&schan->chan_lock);
+
+ handled++;
+ }
+
+ return !!handled;
+}
+EXPORT_SYMBOL(shdma_reset);
+
+static irqreturn_t chan_irq(int irq, void *dev)
+{
+ struct shdma_chan *schan = dev;
+ const struct shdma_ops *ops =
+ to_shdma_dev(schan->dma_chan.device)->ops;
+ irqreturn_t ret;
+
+ spin_lock(&schan->chan_lock);
+
+ ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
+
+ spin_unlock(&schan->chan_lock);
+
+ return ret;
+}
+
+static irqreturn_t chan_irqt(int irq, void *dev)
+{
+ struct shdma_chan *schan = dev;
+ const struct shdma_ops *ops =
+ to_shdma_dev(schan->dma_chan.device)->ops;
+ struct shdma_desc *sdesc;
+
+ spin_lock_irq(&schan->chan_lock);
+ list_for_each_entry(sdesc, &schan->ld_queue, node) {
+ if (sdesc->mark == DESC_SUBMITTED &&
+ ops->desc_completed(schan, sdesc)) {
+ dev_dbg(schan->dev, "done #%d@%p\n",
+ sdesc->async_tx.cookie, &sdesc->async_tx);
+ sdesc->mark = DESC_COMPLETED;
+ break;
+ }
+ }
+ /* Next desc */
+ shdma_chan_xfer_ld_queue(schan);
+ spin_unlock_irq(&schan->chan_lock);
+
+ shdma_chan_ld_cleanup(schan, false);
+
+ return IRQ_HANDLED;
+}
+
+int shdma_request_irq(struct shdma_chan *schan, int irq,
+ unsigned long flags, const char *name)
+{
+ int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
+ flags, name, schan);
+
+ schan->irq = ret < 0 ? ret : irq;
+
+ return ret;
+}
+EXPORT_SYMBOL(shdma_request_irq);
+
+void shdma_free_irq(struct shdma_chan *schan)
+{
+ if (schan->irq >= 0)
+ free_irq(schan->irq, schan);
+}
+EXPORT_SYMBOL(shdma_free_irq);
+
+void shdma_chan_probe(struct shdma_dev *sdev,
+ struct shdma_chan *schan, int id)
+{
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+
+ /* reference struct dma_device */
+ schan->dma_chan.device = &sdev->dma_dev;
+ dma_cookie_init(&schan->dma_chan);
+
+ schan->dev = sdev->dma_dev.dev;
+ schan->id = id;
+
+ if (!schan->max_xfer_len)
+ schan->max_xfer_len = PAGE_SIZE;
+
+ spin_lock_init(&schan->chan_lock);
+
+ /* Init descripter manage list */
+ INIT_LIST_HEAD(&schan->ld_queue);
+ INIT_LIST_HEAD(&schan->ld_free);
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&schan->dma_chan.device_node,
+ &sdev->dma_dev.channels);
+ sdev->schan[sdev->dma_dev.chancnt++] = schan;
+}
+EXPORT_SYMBOL(shdma_chan_probe);
+
+void shdma_chan_remove(struct shdma_chan *schan)
+{
+ list_del(&schan->dma_chan.device_node);
+}
+EXPORT_SYMBOL(shdma_chan_remove);
+
+int shdma_init(struct device *dev, struct shdma_dev *sdev,
+ int chan_num)
+{
+ struct dma_device *dma_dev = &sdev->dma_dev;
+
+ /*
+ * Require all call-backs for now, they can trivially be made optional
+ * later as required
+ */
+ if (!sdev->ops ||
+ !sdev->desc_size ||
+ !sdev->ops->embedded_desc ||
+ !sdev->ops->start_xfer ||
+ !sdev->ops->setup_xfer ||
+ !sdev->ops->set_slave ||
+ !sdev->ops->desc_setup ||
+ !sdev->ops->slave_addr ||
+ !sdev->ops->channel_busy ||
+ !sdev->ops->halt_channel ||
+ !sdev->ops->desc_completed)
+ return -EINVAL;
+
+ sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
+ if (!sdev->schan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Common and MEMCPY operations */
+ dma_dev->device_alloc_chan_resources
+ = shdma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = shdma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
+ dma_dev->device_tx_status = shdma_tx_status;
+ dma_dev->device_issue_pending = shdma_issue_pending;
+
+ /* Compulsory for DMA_SLAVE fields */
+ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
+ dma_dev->device_control = shdma_control;
+
+ dma_dev->dev = dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(shdma_init);
+
+void shdma_cleanup(struct shdma_dev *sdev)
+{
+ kfree(sdev->schan);
+}
+EXPORT_SYMBOL(shdma_cleanup);
+
+static int __init shdma_enter(void)
+{
+ shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
+ sizeof(long), GFP_KERNEL);
+ if (!shdma_slave_used)
+ return -ENOMEM;
+ return 0;
+}
+module_init(shdma_enter);
+
+static void __exit shdma_exit(void)
+{
+ kfree(shdma_slave_used);
+}
+module_exit(shdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SH-DMA driver base library");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
new file mode 100644
index 00000000000..f41bcc5267f
--- /dev/null
+++ b/drivers/dma/sh/shdma.c
@@ -0,0 +1,955 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * base is drivers/dma/flsdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - MAX DMA size is 16MB.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/spinlock.h>
+#include <linux/rculist.h>
+
+#include "../dmaengine.h"
+#include "shdma.h"
+
+#define SH_DMAE_DRV_NAME "sh-dma-engine"
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define SH_DMA_SLAVE_NUMBER 256
+#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
+
+/*
+ * Used for write-side mutual exclusion for the global device list,
+ * read-side synchronization by way of RCU, and per-controller data.
+ */
+static DEFINE_SPINLOCK(sh_dmae_lock);
+static LIST_HEAD(sh_dmae_devices);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, shdev->chan_reg +
+ shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+}
+
+static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
+{
+ __raw_writel(data, sh_dc->base + reg / sizeof(u32));
+}
+
+static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
+{
+ return __raw_readl(sh_dc->base + reg / sizeof(u32));
+}
+
+static u16 dmaor_read(struct sh_dmae_device *shdev)
+{
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
+}
+
+static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
+{
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+/*
+ * Reset DMA controller
+ *
+ * SH7780 has two DMAOR register
+ */
+static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
+{
+ unsigned short dmaor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sh_dmae_lock, flags);
+
+ dmaor = dmaor_read(shdev);
+ dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
+
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
+}
+
+static int sh_dmae_rst(struct sh_dmae_device *shdev)
+{
+ unsigned short dmaor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sh_dmae_lock, flags);
+
+ dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ chclr_write(sh_chan, 0);
+ }
+ }
+
+ dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
+
+ dmaor = dmaor_read(shdev);
+
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
+
+ if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
+ dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
+ return -EIO;
+ }
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->shdma_dev.dma_dev.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
+ return 0;
+}
+
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = chcr_read(sh_chan);
+
+ if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+ return true; /* working */
+
+ return false; /* waiting */
+}
+
+static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
+ ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
+
+ if (cnt >= pdata->ts_shift_num)
+ cnt = 0;
+
+ return pdata->ts_shift[cnt];
+}
+
+static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int i;
+
+ for (i = 0; i < pdata->ts_shift_num; i++)
+ if (pdata->ts_shift[i] == l2size)
+ break;
+
+ if (i == pdata->ts_shift_num)
+ i = 0;
+
+ return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
+ ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
+}
+
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
+{
+ sh_dmae_writel(sh_chan, hw->sar, SAR);
+ sh_dmae_writel(sh_chan, hw->dar, DAR);
+ sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
+}
+
+static void dmae_start(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
+
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
+}
+
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+ /*
+ * Default configuration for dual address memory-memory transfer.
+ * 0x400 represents auto-request.
+ */
+ u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+ LOG2_DEFAULT_XFER_SIZE);
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
+ chcr_write(sh_chan, chcr);
+}
+
+static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
+{
+ /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
+ chcr_write(sh_chan, val);
+
+ return 0;
+}
+
+static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
+ u16 __iomem *addr = shdev->dmars;
+ unsigned int shift = chan_pdata->dmars_bit;
+
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+
+ if (pdata->no_dmars)
+ return 0;
+
+ /* in the case of a missing DMARS resource use first memory window */
+ if (!addr)
+ addr = (u16 __iomem *)shdev->chan_reg;
+ addr += chan_pdata->dmars / sizeof(u16);
+
+ __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
+ addr);
+
+ return 0;
+}
+
+static void sh_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
+ sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
+ sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
+ /* Get the ld start address from ld_queue */
+ dmae_set_reg(sh_chan, &sh_desc->hw);
+ dmae_start(sh_chan);
+}
+
+static bool sh_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ return dmae_is_busy(sh_chan);
+}
+
+static void sh_dmae_setup_xfer(struct shdma_chan *schan,
+ int slave_id)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+
+ if (slave_id >= 0) {
+ const struct sh_dmae_slave_config *cfg =
+ sh_chan->config;
+
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+}
+
+static const struct sh_dmae_slave_config *dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, int slave_id)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_slave_config *cfg;
+ int i;
+
+ if (slave_id >= SH_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == slave_id)
+ return cfg;
+
+ return NULL;
+}
+
+static int sh_dmae_set_slave(struct shdma_chan *schan,
+ int slave_id, bool try)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
+ if (!cfg)
+ return -ENODEV;
+
+ if (!try)
+ sh_chan->config = cfg;
+
+ return 0;
+}
+
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
+}
+
+static int sh_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+
+ if (*len > schan->max_xfer_len)
+ *len = schan->max_xfer_len;
+
+ sh_desc->hw.sar = src;
+ sh_desc->hw.dar = dst;
+ sh_desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static void sh_dmae_halt(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ dmae_halt(sh_chan);
+}
+
+static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+
+ if (!(chcr_read(sh_chan) & CHCR_TE))
+ return false;
+
+ /* DMA stop */
+ dmae_halt(sh_chan);
+
+ return true;
+}
+
+static size_t sh_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
+ sh_chan->xmit_shift;
+}
+
+/* Called from error IRQ or NMI */
+static bool sh_dmae_reset(struct sh_dmae_device *shdev)
+{
+ bool ret;
+
+ /* halt the dma controller */
+ sh_dmae_ctl_stop(shdev);
+
+ /* We cannot detect, which channel caused the error, have to reset all */
+ ret = shdma_reset(&shdev->shdma_dev);
+
+ sh_dmae_rst(shdev);
+
+ return ret;
+}
+
+static irqreturn_t sh_dmae_err(int irq, void *data)
+{
+ struct sh_dmae_device *shdev = data;
+
+ if (!(dmaor_read(shdev) & DMAOR_AE))
+ return IRQ_NONE;
+
+ sh_dmae_reset(shdev);
+ return IRQ_HANDLED;
+}
+
+static bool sh_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+ u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
+
+ return (sdesc->direction == DMA_DEV_TO_MEM &&
+ (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
+ (sdesc->direction != DMA_DEV_TO_MEM &&
+ (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
+}
+
+static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
+{
+ /* Fast path out if NMIF is not asserted for this controller */
+ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
+ return false;
+
+ return sh_dmae_reset(shdev);
+}
+
+static int sh_dmae_nmi_handler(struct notifier_block *self,
+ unsigned long cmd, void *data)
+{
+ struct sh_dmae_device *shdev;
+ int ret = NOTIFY_DONE;
+ bool triggered;
+
+ /*
+ * Only concern ourselves with NMI events.
+ *
+ * Normally we would check the die chain value, but as this needs
+ * to be architecture independent, check for NMI context instead.
+ */
+ if (!in_nmi())
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
+ /*
+ * Only stop if one of the controllers has NMIF asserted,
+ * we do not want to interfere with regular address error
+ * handling or NMI events that don't concern the DMACs.
+ */
+ triggered = sh_dmae_nmi_notify(shdev);
+ if (triggered == true)
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+ .notifier_call = sh_dmae_nmi_handler,
+
+ /* Run before NMI debug handler and KGDB */
+ .priority = 1,
+};
+
+static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+ int irq, unsigned long flags)
+{
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ struct shdma_dev *sdev = &shdev->shdma_dev;
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+ struct sh_dmae_chan *sh_chan;
+ struct shdma_chan *schan;
+ int err;
+
+ sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ if (!sh_chan) {
+ dev_err(sdev->dma_dev.dev,
+ "No free memory for allocating dma channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &sh_chan->shdma_chan;
+ schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+
+ /* set up channel irq */
+ if (pdev->id >= 0)
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+ "sh-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+ "sh-dma%d", id);
+
+ err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
+ if (err) {
+ dev_err(sdev->dma_dev.dev,
+ "DMA channel %d request_irq error %d\n",
+ id, err);
+ goto err_no_irq;
+ }
+
+ shdev->chan[id] = sh_chan;
+ return 0;
+
+err_no_irq:
+ /* remove from dmaengine device node */
+ shdma_chan_remove(schan);
+ kfree(sh_chan);
+ return err;
+}
+
+static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
+{
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+ BUG_ON(!schan);
+
+ shdma_free_irq(&sh_chan->shdma_chan);
+
+ shdma_chan_remove(schan);
+ kfree(sh_chan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static void sh_dmae_shutdown(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ sh_dmae_ctl_stop(shdev);
+}
+
+static int sh_dmae_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int sh_dmae_runtime_resume(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+ return sh_dmae_rst(shdev);
+}
+
+#ifdef CONFIG_PM
+static int sh_dmae_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int sh_dmae_resume(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
+
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+
+ if (!sh_chan->shdma_chan.desc_num)
+ continue;
+
+ if (sh_chan->shdma_chan.slave_id >= 0) {
+ const struct sh_dmae_slave_config *cfg = sh_chan->config;
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+ }
+
+ return 0;
+}
+#else
+#define sh_dmae_suspend NULL
+#define sh_dmae_resume NULL
+#endif
+
+const struct dev_pm_ops sh_dmae_pm = {
+ .suspend = sh_dmae_suspend,
+ .resume = sh_dmae_resume,
+ .runtime_suspend = sh_dmae_runtime_suspend,
+ .runtime_resume = sh_dmae_runtime_resume,
+};
+
+static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+
+ /*
+ * Implicit BUG_ON(!sh_chan->config)
+ * This is an exclusive slave DMA operation, may only be called after a
+ * successful slave configuration.
+ */
+ return sh_chan->config->addr;
+}
+
+static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sh_dmae_shdma_ops = {
+ .desc_completed = sh_dmae_desc_completed,
+ .halt_channel = sh_dmae_halt,
+ .channel_busy = sh_dmae_channel_busy,
+ .slave_addr = sh_dmae_slave_addr,
+ .desc_setup = sh_dmae_desc_setup,
+ .set_slave = sh_dmae_set_slave,
+ .setup_xfer = sh_dmae_setup_xfer,
+ .start_xfer = sh_dmae_start_xfer,
+ .embedded_desc = sh_dmae_embedded_desc,
+ .chan_irq = sh_dmae_chan_irq,
+ .get_partial = sh_dmae_get_partial,
+};
+
+static int __devinit sh_dmae_probe(struct platform_device *pdev)
+{
+ struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ unsigned long irqflags = IRQF_DISABLED,
+ chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+ int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+ int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+ struct sh_dmae_device *shdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+
+ /* get platform data */
+ if (!pdata || !pdata->channel_num)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* DMARS area is optional */
+ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /*
+ * IRQ resources:
+ * 1. there always must be at least one IRQ IO-resource. On SH4 it is
+ * the error IRQ, in which case it is the only IRQ in this resource:
+ * start == end. If it is the only IRQ resource, all channels also
+ * use the same IRQ.
+ * 2. DMA channel IRQ resources can be specified one per resource or in
+ * ranges (start != end)
+ * 3. iff all events (channels and, optionally, error) on this
+ * controller use the same IRQ, only one IRQ resource can be
+ * specified, otherwise there must be one IRQ per channel, even if
+ * some of them are equal
+ * 4. if all IRQs on this controller are equal or if some specific IRQs
+ * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
+ * requested with the IRQF_SHARED flag
+ */
+ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!chan || !errirq_res)
+ return -ENODEV;
+
+ if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
+ dev_err(&pdev->dev, "DMAC register region already claimed\n");
+ return -EBUSY;
+ }
+
+ if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
+ dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
+ err = -EBUSY;
+ goto ermrdmars;
+ }
+
+ err = -ENOMEM;
+ shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ if (!shdev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ goto ealloc;
+ }
+
+ dma_dev = &shdev->shdma_dev.dma_dev;
+
+ shdev->chan_reg = ioremap(chan->start, resource_size(chan));
+ if (!shdev->chan_reg)
+ goto emapchan;
+ if (dmars) {
+ shdev->dmars = ioremap(dmars->start, resource_size(dmars));
+ if (!shdev->dmars)
+ goto emapdmars;
+ }
+
+ if (!pdata->slave_only)
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ if (pdata->slave && pdata->slave_num)
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ /* Default transfer size of 32 bytes requires 32-byte alignment */
+ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+
+ shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
+ shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
+ err = shdma_init(&pdev->dev, &shdev->shdma_dev,
+ pdata->channel_num);
+ if (err < 0)
+ goto eshdma;
+
+ /* platform data */
+ shdev->pdata = pdev->dev.platform_data;
+
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
+ platform_set_drvdata(pdev, shdev);
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ spin_lock_irq(&sh_dmae_lock);
+ list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ /* reset dma controller - only needed as a test */
+ err = sh_dmae_rst(shdev);
+ if (err)
+ goto rst_err;
+
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+ chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!chanirq_res)
+ chanirq_res = errirq_res;
+ else
+ irqres++;
+
+ if (chanirq_res == errirq_res ||
+ (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+ irqflags = IRQF_SHARED;
+
+ errirq = errirq_res->start;
+
+ err = request_irq(errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA failed requesting irq #%d, error %d\n",
+ errirq, err);
+ goto eirq_err;
+ }
+
+#else
+ chanirq_res = errirq_res;
+#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
+
+ if (chanirq_res->start == chanirq_res->end &&
+ !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
+ /* Special case - all multiplexed */
+ for (; irq_cnt < pdata->channel_num; irq_cnt++) {
+ if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
+ chan_irq[irq_cnt] = chanirq_res->start;
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ } else {
+ irq_cap = 1;
+ break;
+ }
+ }
+ } else {
+ do {
+ for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
+ irq_cap = 1;
+ break;
+ }
+
+ if ((errirq_res->flags & IORESOURCE_BITS) ==
+ IORESOURCE_IRQ_SHAREABLE)
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ else
+ chan_flag[irq_cnt] = IRQF_DISABLED;
+ dev_dbg(&pdev->dev,
+ "Found IRQ %d for channel %d\n",
+ i, irq_cnt);
+ chan_irq[irq_cnt++] = i;
+ }
+
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
+ break;
+
+ chanirq_res = platform_get_resource(pdev,
+ IORESOURCE_IRQ, ++irqres);
+ } while (irq_cnt < pdata->channel_num && chanirq_res);
+ }
+
+ /* Create DMA Channel */
+ for (i = 0; i < irq_cnt; i++) {
+ err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ if (irq_cap)
+ dev_notice(&pdev->dev, "Attempting to register %d DMA "
+ "channels when a maximum of %d are supported.\n",
+ pdata->channel_num, SH_DMAE_MAX_CHANNELS);
+
+ pm_runtime_put(&pdev->dev);
+
+ err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
+ if (err < 0)
+ goto edmadevreg;
+
+ return err;
+
+edmadevreg:
+ pm_runtime_get(&pdev->dev);
+
+chan_probe_err:
+ sh_dmae_chan_remove(shdev);
+
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+ free_irq(errirq, shdev);
+eirq_err:
+#endif
+rst_err:
+ spin_lock_irq(&sh_dmae_lock);
+ list_del_rcu(&shdev->node);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ platform_set_drvdata(pdev, NULL);
+ shdma_cleanup(&shdev->shdma_dev);
+eshdma:
+ if (dmars)
+ iounmap(shdev->dmars);
+emapdmars:
+ iounmap(shdev->chan_reg);
+ synchronize_rcu();
+emapchan:
+ kfree(shdev);
+ealloc:
+ if (dmars)
+ release_mem_region(dmars->start, resource_size(dmars));
+ermrdmars:
+ release_mem_region(chan->start, resource_size(chan));
+
+ return err;
+}
+
+static int __devexit sh_dmae_remove(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+ struct resource *res;
+ int errirq = platform_get_irq(pdev, 0);
+
+ dma_async_device_unregister(dma_dev);
+
+ if (errirq > 0)
+ free_irq(errirq, shdev);
+
+ spin_lock_irq(&sh_dmae_lock);
+ list_del_rcu(&shdev->node);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ pm_runtime_disable(&pdev->dev);
+
+ sh_dmae_chan_remove(shdev);
+ shdma_cleanup(&shdev->shdma_dev);
+
+ if (shdev->dmars)
+ iounmap(shdev->dmars);
+ iounmap(shdev->chan_reg);
+
+ platform_set_drvdata(pdev, NULL);
+
+ synchronize_rcu();
+ kfree(shdev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+static struct platform_driver sh_dmae_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .pm = &sh_dmae_pm,
+ .name = SH_DMAE_DRV_NAME,
+ },
+ .remove = __devexit_p(sh_dmae_remove),
+ .shutdown = sh_dmae_shutdown,
+};
+
+static int __init sh_dmae_init(void)
+{
+ /* Wire up NMI handling */
+ int err = register_die_notifier(&sh_dmae_nmi_notifier);
+ if (err)
+ return err;
+
+ return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
+}
+module_init(sh_dmae_init);
+
+static void __exit sh_dmae_exit(void)
+{
+ platform_driver_unregister(&sh_dmae_driver);
+
+ unregister_die_notifier(&sh_dmae_nmi_notifier);
+}
+module_exit(sh_dmae_exit);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
+MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
new file mode 100644
index 00000000000..9314e93225d
--- /dev/null
+++ b/drivers/dma/sh/shdma.h
@@ -0,0 +1,64 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __DMA_SHDMA_H
+#define __DMA_SHDMA_H
+
+#include <linux/sh_dma.h>
+#include <linux/shdma-base.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+
+#define SH_DMAE_MAX_CHANNELS 20
+#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
+
+struct device;
+
+struct sh_dmae_chan {
+ struct shdma_chan shdma_chan;
+ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ u32 __iomem *base;
+ char dev_id[16]; /* unique name per DMAC of channel */
+ int pm_error;
+};
+
+struct sh_dmae_device {
+ struct shdma_dev shdma_dev;
+ struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
+ struct sh_dmae_pdata *pdata;
+ struct list_head node;
+ u32 __iomem *chan_reg;
+ u16 __iomem *dmars;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
+};
+
+struct sh_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_dmae_desc {
+ struct sh_dmae_regs hw;
+ struct shdma_desc shdma_desc;
+};
+
+#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
+#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
+#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
+ struct sh_dmae_device, shdma_dev.dma_dev)
+
+#endif /* __DMA_SHDMA_H */