diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 11 | ||||
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 941 | ||||
-rw-r--r-- | drivers/dma/omap-dma.c | 669 | ||||
-rw-r--r-- | drivers/dma/sa11x0-dma.c | 388 | ||||
-rw-r--r-- | drivers/dma/virt-dma.c | 123 | ||||
-rw-r--r-- | drivers/dma/virt-dma.h | 152 |
7 files changed, 1622 insertions, 664 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d45cf1bcbde..d06ea2950dd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -53,6 +53,7 @@ config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA && EXPERIMENTAL select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Platform has a PL08x DMAC device which can provide DMA engine support @@ -269,6 +270,7 @@ config DMA_SA11X0 tristate "SA-11x0 DMA support" depends on ARCH_SA1100 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the DMA engine found on Intel StrongARM SA-1100 and SA-1110 SoCs. This DMA engine can only be used with on-chip @@ -284,9 +286,18 @@ config MMP_TDMA Say Y here if you enabled MMP ADMA, otherwise say N. +config DMA_OMAP + tristate "OMAP DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config DMA_ENGINE bool +config DMA_VIRTUAL_CHANNELS + tristate + comment "DMA Clients" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 640356add0a..4cf6b128ab9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o +obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o @@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 49ecbbb8932..6fbeebb9486 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -86,10 +86,12 @@ #include <asm/hardware/pl080.h> #include "dmaengine.h" +#include "virt-dma.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; +struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives @@ -119,6 +121,123 @@ struct pl08x_lli { }; /** + * struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @serving: the virtual channel currently being served by this physical + * channel + * @locked: channel unavailable for the system, e.g. dedicated to secure + * world + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + struct pl08x_dma_chan *serving; + bool locked; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @vd: virtual DMA descriptor + * @dsg_list: list of children sg's + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + * @done: this marks completed descriptors, which should not have their + * mux released. + */ +struct pl08x_txd { + struct virt_dma_desc vd; + struct list_head dsg_list; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; + bool done; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @vc: wrappped virtual channel + * @phychan: the physical channel utilized by this channel, if there is one + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @signal: the physical DMA request signal which this channel is using + * @mux_use: count of descriptors using this DMA request signal setting + */ +struct pl08x_dma_chan { + struct virt_dma_chan vc; + struct pl08x_phy_chan *phychan; + const char *name; + const struct pl08x_channel_data *cd; + struct dma_slave_config cfg; + struct pl08x_txd *at; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + int signal; + unsigned mux_use; +}; + +/** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance * @memcpy: memcpy engine for this instance @@ -128,7 +247,6 @@ struct pl08x_lli { * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors - * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. @@ -143,10 +261,8 @@ struct pl08x_driver_data { struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; - int pool_ctr; u8 lli_buses; u8 mem_buses; - spinlock_t lock; }; /* @@ -162,12 +278,51 @@ struct pl08x_driver_data { static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { - return container_of(chan, struct pl08x_dma_chan, chan); + return container_of(chan, struct pl08x_dma_chan, vc.chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { - return container_of(tx, struct pl08x_txd, tx); + return container_of(tx, struct pl08x_txd, vd.tx); +} + +/* + * Mux handling. + * + * This gives us the DMA request input to the PL08x primecell which the + * peripheral described by the channel data will be routed to, possibly + * via a board/SoC specific external MUX. One important point to note + * here is that this does not depend on the physical channel. + */ +static int pl08x_request_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + int ret; + + if (plchan->mux_use++ == 0 && pd->get_signal) { + ret = pd->get_signal(plchan->cd); + if (ret < 0) { + plchan->mux_use = 0; + return ret; + } + + plchan->signal = ret; + } + return 0; +} + +static void pl08x_release_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + + if (plchan->signal >= 0) { + WARN_ON(plchan->mux_use == 0); + + if (--plchan->mux_use == 0 && pd->put_signal) { + pd->put_signal(plchan->cd, plchan->signal); + plchan->signal = -1; + } + } } /* @@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ -static void pl08x_start_txd(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; - struct pl08x_lli *lli = &txd->llis_va[0]; + struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_lli *lli; u32 val; + list_del(&txd->vd.node); + plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); + lli = &txd->llis_va[0]; + dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", @@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; - unsigned long flags; size_t bytes = 0; - spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; @@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) } } - /* Sum up all queued transactions */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *txdi; - list_for_each_entry(txdi, &plchan->pend_list, node) { - struct pl08x_sg *dsg; - list_for_each_entry(dsg, &txd->dsg_list, node) - bytes += dsg->len; - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - return bytes; } @@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, if (!ch->locked && !ch->serving) { ch->serving = virt_chan; - ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } @@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } - pm_runtime_get_sync(&pl08x->adev->dev); return ch; } +/* Mark the physical channel as free. Note, this write is atomic. */ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - unsigned long flags; + ch->serving = NULL; +} + +/* + * Try to allocate a physical channel. When successful, assign it to + * this virtual channel, and initiate the next descriptor. The + * virtual channel lock must be held at this point. + */ +static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_phy_chan *ch; - spin_lock_irqsave(&ch->lock, flags); + ch = pl08x_get_phy_channel(pl08x, plchan); + if (!ch) { + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); + plchan->state = PL08X_CHAN_WAITING; + return; + } - /* Stop the channel and clear its interrupts */ - pl08x_terminate_phy_chan(pl08x, ch); + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", + ch->id, plchan->name); - pm_runtime_put(&pl08x->adev->dev); + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} - /* Mark it as free */ - ch->serving = NULL; - spin_unlock_irqrestore(&ch->lock, flags); +static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, + struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + + dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", + ch->id, plchan->name); + + /* + * We do this without taking the lock; we're really only concerned + * about whether this pointer is NULL or not, and we're guaranteed + * that this will only be called when it _already_ is non-NULL. + */ + ch->serving = plchan; + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} + +/* + * Free a physical DMA channel, potentially reallocating it to another + * virtual channel if we have any pending. + */ +static void pl08x_phy_free(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_dma_chan *p, *next; + + retry: + next = NULL; + + /* Find a waiting virtual channel for the next transfer. */ + list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + + if (!next) { + list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + } + + /* Ensure that the physical channel is stopped */ + pl08x_terminate_phy_chan(pl08x, plchan->phychan); + + if (next) { + bool success; + + /* + * Eww. We know this isn't going to deadlock + * but lockdep probably doesn't. + */ + spin_lock(&next->vc.lock); + /* Re-check the state now that we have the lock */ + success = next->state == PL08X_CHAN_WAITING; + if (success) + pl08x_phy_reassign_start(plchan->phychan, next); + spin_unlock(&next->vc.lock); + + /* If the state changed, try to find another channel */ + if (!success) + goto retry; + } else { + /* No more jobs, so free up the physical channel */ + pl08x_put_phy_channel(pl08x, plchan->phychan); + } + + plchan->phychan = NULL; + plchan->state = PL08X_CHAN_IDLE; } /* @@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } - pl08x->pool_ctr++; - bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; @@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return num_llis; } -/* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; - /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); - pl08x->pool_ctr--; - list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); @@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, - struct pl08x_dma_chan *plchan) +static void pl08x_unmap_buffers(struct pl08x_txd *txd) { - struct pl08x_txd *txdi = NULL; - struct pl08x_txd *next; - - if (!list_empty(&plchan->pend_list)) { - list_for_each_entry_safe(txdi, - next, &plchan->pend_list, node) { - list_del(&txdi->node); - pl08x_free_txd(pl08x, txdi); + struct device *dev = txd->vd.tx.chan->device->dev; + struct pl08x_sg *dsg; + + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + else { + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); } } + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + else + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + } } -/* - * The DMA ENGINE API - */ -static int pl08x_alloc_chan_resources(struct dma_chan *chan) +static void pl08x_desc_free(struct virt_dma_desc *vd) { - return 0; -} + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); -static void pl08x_free_chan_resources(struct dma_chan *chan) -{ + if (!plchan->slave) + pl08x_unmap_buffers(txd); + + if (!txd->done) + pl08x_release_mux(plchan); + + pl08x_free_txd(plchan->host, txd); } -/* - * This should be called with the channel plchan->lock held - */ -static int prep_phy_channel(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, + struct pl08x_dma_chan *plchan) { - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_phy_chan *ch; - int ret; - - /* Check if we already have a channel */ - if (plchan->phychan) { - ch = plchan->phychan; - goto got_channel; - } + LIST_HEAD(head); + struct pl08x_txd *txd; - ch = pl08x_get_phy_channel(pl08x, plchan); - if (!ch) { - /* No physical channel available, cope with it */ - dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); - return -EBUSY; - } + vchan_get_all_descriptors(&plchan->vc, &head); - /* - * OK we have a physical channel: for memcpy() this is all we - * need, but for slaves the physical signals may be muxed! - * Can the platform allow us to use this channel? - */ - if (plchan->slave && pl08x->pd->get_signal) { - ret = pl08x->pd->get_signal(plchan); - if (ret < 0) { - dev_dbg(&pl08x->adev->dev, - "unable to use physical channel %d for transfer on %s due to platform restrictions\n", - ch->id, plchan->name); - /* Release physical channel & return */ - pl08x_put_phy_channel(pl08x, ch); - return -EBUSY; - } - ch->signal = ret; + while (!list_empty(&head)) { + txd = list_first_entry(&head, struct pl08x_txd, vd.node); + list_del(&txd->vd.node); + pl08x_desc_free(&txd->vd); } - - plchan->phychan = ch; - dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", - ch->id, - ch->signal, - plchan->name); - -got_channel: - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_MEM_TO_DEV) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_DEV_TO_MEM) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; - - plchan->phychan_hold++; - - return 0; } -static void release_phy_channel(struct pl08x_dma_chan *plchan) +/* + * The DMA ENGINE API + */ +static int pl08x_alloc_chan_resources(struct dma_chan *chan) { - struct pl08x_driver_data *pl08x = plchan->host; - - if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { - pl08x->pd->put_signal(plchan); - plchan->phychan->signal = -1; - } - pl08x_put_phy_channel(pl08x, plchan->phychan); - plchan->phychan = NULL; + return 0; } -static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) +static void pl08x_free_chan_resources(struct dma_chan *chan) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); - struct pl08x_txd *txd = to_pl08x_txd(tx); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&plchan->lock, flags); - cookie = dma_cookie_assign(tx); - - /* Put this onto the pending list */ - list_add_tail(&txd->node, &plchan->pend_list); - - /* - * If there was no physical channel available for this memcpy, - * stack the request up and indicate that the channel is waiting - * for a free physical channel. - */ - if (!plchan->slave && !plchan->phychan) { - /* Do this memcpy whenever there is a channel ready */ - plchan->state = PL08X_CHAN_WAITING; - plchan->waiting = txd; - } else { - plchan->phychan_hold--; - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - return cookie; + /* Ensure all queued descriptors are freed */ + vchan_free_chan_resources(to_virt_chan(chan)); } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( @@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; enum dma_status ret; + size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) { + if (plchan->state == PL08X_CHAN_PAUSED) + ret = DMA_PAUSED; + return ret; + } + + spin_lock_irqsave(&plchan->vc.lock, flags); + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + vd = vchan_find_desc(&plchan->vc, cookie); + if (vd) { + /* On the issued list, so hasn't been processed yet */ + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_sg *dsg; + + list_for_each_entry(dsg, &txd->dsg_list, node) + bytes += dsg->len; + } else { + bytes = pl08x_getbytes_chan(plchan); + } + } + spin_unlock_irqrestore(&plchan->vc.lock, flags); + + /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ - dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); + dma_set_residue(txstate, bytes); - if (plchan->state == PL08X_CHAN_PAUSED) - return DMA_PAUSED; + if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; /* Whether waiting or running, we're in progress */ - return DMA_IN_PROGRESS; + return ret; } /* PrimeCell DMA extension */ @@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst) return burst_sizes[i].reg; } -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, + enum dma_slave_buswidth addr_width, u32 maxburst) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct pl08x_driver_data *pl08x = plchan->host; - enum dma_slave_buswidth addr_width; - u32 width, burst, maxburst; - u32 cctl = 0; - - if (!plchan->slave) - return -EINVAL; - - /* Transfer direction */ - plchan->runtime_direction = config->direction; - if (config->direction == DMA_MEM_TO_DEV) { - addr_width = config->dst_addr_width; - maxburst = config->dst_maxburst; - } else if (config->direction == DMA_DEV_TO_MEM) { - addr_width = config->src_addr_width; - maxburst = config->src_maxburst; - } else { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien transfer direction\n"); - return -EINVAL; - } + u32 width, burst, cctl = 0; width = pl08x_width(addr_width); - if (width == ~0) { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien address width\n"); - return -EINVAL; - } + if (width == ~0) + return ~0; cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; @@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - plchan->device_fc = config->device_fc; + return pl08x_cctl(cctl); +} - if (plchan->runtime_direction == DMA_DEV_TO_MEM) { - plchan->src_addr = config->src_addr; - plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | - pl08x_select_bus(plchan->cd->periph_buses, - pl08x->mem_buses); - } else { - plchan->dst_addr = config->dst_addr; - plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(pl08x->mem_buses, - plchan->cd->periph_buses); - } +static int dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - dev_dbg(&pl08x->adev->dev, - "configured channel %s (%s) for %s, data width %d, " - "maxburst %d words, LE, CCTL=0x%08x\n", - dma_chan_name(chan), plchan->name, - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", - addr_width, - maxburst, - cctl); + if (!plchan->slave) + return -EINVAL; + + /* Reject definitely invalid configurations */ + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + plchan->cfg = *config; return 0; } @@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); - /* Something is already active, or we're waiting for a channel... */ - if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { - spin_unlock_irqrestore(&plchan->lock, flags); - return; - } - - /* Take the first element in the queue and execute it */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - plchan->state = PL08X_CHAN_RUNNING; - - pl08x_start_txd(plchan, next); + spin_lock_irqsave(&plchan->vc.lock, flags); + if (vchan_issue_pending(&plchan->vc)) { + if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) + pl08x_phy_alloc_and_start(plchan); } - - spin_unlock_irqrestore(&plchan->lock, flags); -} - -static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) -{ - struct pl08x_driver_data *pl08x = plchan->host; - unsigned long flags; - int num_llis, ret; - - num_llis = pl08x_fill_llis_for_desc(pl08x, txd); - if (!num_llis) { - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EINVAL; - } - - spin_lock_irqsave(&plchan->lock, flags); - - /* - * See if we already have a physical channel allocated, - * else this is the time to try to get one. - */ - ret = prep_phy_channel(plchan, txd); - if (ret) { - /* - * No physical channel was available. - * - * memcpy transfers can be sorted out at submission time. - * - * Slave transfers may have been denied due to platform - * channel muxing restrictions. Since there is no guarantee - * that this will ever be resolved, and the signal must be - * acquired AFTER acquiring the physical channel, we will let - * them be NACK:ed with -EBUSY here. The drivers can retry - * the prep() call if they are eager on doing this using DMA. - */ - if (plchan->slave) { - pl08x_free_txd_list(pl08x, plchan); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EBUSY; - } - } else - /* - * Else we're all set, paused and ready to roll, status - * will switch to PL08X_CHAN_RUNNING when we call - * issue_pending(). If there is something running on the - * channel already we don't change its state. - */ - if (plchan->state == PL08X_CHAN_IDLE) - plchan->state = PL08X_CHAN_PAUSED; - - spin_unlock_irqrestore(&plchan->lock, flags); - - return 0; + spin_unlock_irqrestore(&plchan->vc.lock, flags); } -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, - unsigned long flags) +static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { - dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = pl08x_tx_submit; - INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ @@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_sg *dsg; int ret; - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); @@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( } list_add_tail(&dsg->node, &txd->dsg_list); - txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl & + txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ @@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( @@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; + enum dma_slave_buswidth addr_width; dma_addr_t slave_addr; int ret, tmp; + u8 src_buses, dst_buses; + u32 maxburst, cctl; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } - if (direction != plchan->runtime_direction) - dev_err(&pl08x->adev->dev, "%s DMA setup does not match " - "the direction configured for the PrimeCell\n", - __func__); - /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ - txd->direction = direction; - if (direction == DMA_MEM_TO_DEV) { - txd->cctl = plchan->dst_cctl; - slave_addr = plchan->dst_addr; + cctl = PL080_CONTROL_SRC_INCR; + slave_addr = plchan->cfg.dst_addr; + addr_width = plchan->cfg.dst_addr_width; + maxburst = plchan->cfg.dst_maxburst; + src_buses = pl08x->mem_buses; + dst_buses = plchan->cd->periph_buses; } else if (direction == DMA_DEV_TO_MEM) { - txd->cctl = plchan->src_cctl; - slave_addr = plchan->src_addr; + cctl = PL080_CONTROL_DST_INCR; + slave_addr = plchan->cfg.src_addr; + addr_width = plchan->cfg.src_addr_width; + maxburst = plchan->cfg.src_maxburst; + src_buses = plchan->cd->periph_buses; + dst_buses = pl08x->mem_buses; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, @@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } - if (plchan->device_fc) + cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); + if (cctl == ~0) { + pl08x_free_txd(pl08x, txd); + dev_err(&pl08x->adev->dev, + "DMA slave configuration botched?\n"); + return NULL; + } + + txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); + + if (plchan->cfg.device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else @@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + ret = pl08x_request_mux(plchan); + if (ret < 0) { + pl08x_free_txd(pl08x, txd); + dev_dbg(&pl08x->adev->dev, + "unable to mux for transfer on %s due to platform restrictions\n", + plchan->name); + return NULL; + } + + dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", + plchan->signal, plchan->name); + + /* Assign the flow control signal to this channel */ + if (direction == DMA_MEM_TO_DEV) + txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; + else + txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; + for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { + pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); @@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } } - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->lock, flags); + spin_lock_irqsave(&plchan->vc.lock, flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return 0; } @@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_terminate_phy_chan(pl08x, plchan->phychan); - /* * Mark physical channel as free and free any slave * signal */ - release_phy_channel(plchan); - plchan->phychan_hold = 0; + pl08x_phy_free(plchan); } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_free_txd(pl08x, plchan->at); + pl08x_desc_free(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return ret; } @@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - -static void pl08x_tasklet(unsigned long data) -{ - struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_txd *txd; - unsigned long flags; - - spin_lock_irqsave(&plchan->lock, flags); - - txd = plchan->at; - plchan->at = NULL; - - if (txd) { - /* Update last completed */ - dma_cookie_complete(&txd->tx); - } - - /* If a new descriptor is queued, set it up plchan->at is NULL here */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - - pl08x_start_txd(plchan, next); - } else if (plchan->phychan_hold) { - /* - * This channel is still in use - we have a new txd being - * prepared and will soon be queued. Don't give up the - * physical channel. - */ - } else { - struct pl08x_dma_chan *waiting = NULL; - - /* - * No more jobs, so free up the physical channel - * Free any allocated signal on slave transfers too - */ - release_phy_channel(plchan); - plchan->state = PL08X_CHAN_IDLE; - - /* - * And NOW before anyone else can grab that free:d up - * physical channel, see if there is some memcpy pending - * that seriously needs to start because of being stacked - * up while we were choking the physical channels with data. - */ - list_for_each_entry(waiting, &pl08x->memcpy.channels, - chan.device_node) { - if (waiting->state == PL08X_CHAN_WAITING && - waiting->waiting != NULL) { - int ret; - - /* This should REALLY not fail now */ - ret = prep_phy_channel(waiting, - waiting->waiting); - BUG_ON(ret); - waiting->phychan_hold--; - waiting->state = PL08X_CHAN_RUNNING; - waiting->waiting = NULL; - pl08x_issue_pending(&waiting->chan); - break; - } - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - if (txd) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - /* Don't try to unmap buffers on slave channels */ - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - /* Free the descriptor */ - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - - /* Callback to signal completion */ - if (callback) - callback(callback_param); - } -} - static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; @@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; + struct pl08x_txd *tx; if (!plchan) { dev_err(&pl08x->adev->dev, @@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev) continue; } - /* Schedule tasklet on this channel */ - tasklet_schedule(&plchan->tasklet); + spin_lock(&plchan->vc.lock); + tx = plchan->at; + if (tx) { + plchan->at = NULL; + /* + * This descriptor is done, release its mux + * reservation. + */ + pl08x_release_mux(plchan); + tx->done = true; + vchan_cookie_complete(&tx->vd); + + /* + * And start the next descriptor (if any), + * otherwise free this channel. + */ + if (vchan_next_desc(&plchan->vc)) + pl08x_start_next_txd(plchan); + else + pl08x_phy_free(plchan); + } + spin_unlock(&plchan->vc.lock); + mask |= (1 << i); } } @@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev) static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { - u32 cctl = pl08x_cctl(chan->cd->cctl); - chan->slave = true; chan->name = chan->cd->bus_id; - chan->src_addr = chan->cd->addr; - chan->dst_addr = chan->cd->addr; - chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | - pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); - chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); + chan->cfg.src_addr = chan->cd->addr; + chan->cfg.dst_addr = chan->cd->addr; } /* @@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; + chan->signal = -1; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; @@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } - if (chan->cd->circular_buffer) { - dev_err(&pl08x->adev->dev, - "channel %s: circular buffers not supported\n", - chan->name); - kfree(chan); - continue; - } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); - chan->chan.device = dmadev; - dma_cookie_init(&chan->chan); - - spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pend_list); - tasklet_init(&chan->tasklet, pl08x_tasklet, - (unsigned long) chan); - - list_add_tail(&chan->chan.device_node, &dmadev->channels); + chan->vc.desc_free = pl08x_desc_free; + vchan_init(&chan->vc, dmadev); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); @@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, chan.device_node) { - list_del(&chan->chan.device_node); + next, &dmadev->channels, vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); kfree(chan); } } @@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } - pm_runtime_set_active(&adev->dev); - pm_runtime_enable(&adev->dev); - /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } - spin_lock_init(&pl08x->lock); - pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; @@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->signal = -1; /* * Nomadik variants can have channels that are locked @@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); - pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2026,9 +2034,6 @@ out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: - pm_runtime_put(&adev->dev); - pm_runtime_disable(&adev->dev); - kfree(pl08x); out_no_pl08x: amba_release_regions(adev); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c new file mode 100644 index 00000000000..ae056182613 --- /dev/null +++ b/drivers/dma/omap-dma.c @@ -0,0 +1,669 @@ +/* + * OMAP DMAengine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/omap-dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" +#include <plat/dma.h> + +struct omap_dmadev { + struct dma_device ddev; + spinlock_t lock; + struct tasklet_struct task; + struct list_head pending; +}; + +struct omap_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + unsigned dma_sig; + bool cyclic; + + int dma_ch; + struct omap_desc *desc; + unsigned sgidx; +}; + +struct omap_sg { + dma_addr_t addr; + uint32_t en; /* number of elements (24-bit) */ + uint32_t fn; /* number of frames (16-bit) */ +}; + +struct omap_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + dma_addr_t dev_addr; + + int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ + uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ + uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ + uint8_t periph_port; /* Peripheral port */ + + unsigned sglen; + struct omap_sg sg[0]; +}; + +static const unsigned es_bytes[] = { + [OMAP_DMA_DATA_TYPE_S8] = 1, + [OMAP_DMA_DATA_TYPE_S16] = 2, + [OMAP_DMA_DATA_TYPE_S32] = 4, +}; + +static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) +{ + return container_of(d, struct omap_dmadev, ddev); +} + +static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct omap_chan, vc.chan); +} + +static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct omap_desc, vd.tx); +} + +static void omap_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct omap_desc, vd)); +} + +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, + unsigned idx) +{ + struct omap_sg *sg = d->sg + idx; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + else + omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + + omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, + d->sync_mode, c->dma_sig, d->sync_type); + + omap_start_dma(c->dma_ch); +} + +static void omap_dma_start_desc(struct omap_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct omap_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_omap_dma_desc(&vd->tx); + c->sgidx = 0; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_src_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + else + omap_set_dma_dest_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + + omap_dma_start_sg(c, d, 0); +} + +static void omap_dma_callback(int ch, u16 status, void *data) +{ + struct omap_chan *c = data; + struct omap_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + d = c->desc; + if (d) { + if (!c->cyclic) { + if (++c->sgidx < d->sglen) { + omap_dma_start_sg(c, d, c->sgidx); + } else { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } + } else { + vchan_cyclic_callback(&d->vd); + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +/* + * This callback schedules all pending channels. We could be more + * clever here by postponing allocation of the real DMA channels to + * this point, and freeing them when our virtual channel becomes idle. + * + * We would then need to deal with 'all channels in-use' + */ +static void omap_dma_sched(unsigned long data) +{ + struct omap_dmadev *d = (struct omap_dmadev *)data; + LIST_HEAD(head); + + spin_lock_irq(&d->lock); + list_splice_tail_init(&d->pending, &head); + spin_unlock_irq(&d->lock); + + while (!list_empty(&head)) { + struct omap_chan *c = list_first_entry(&head, + struct omap_chan, node); + + spin_lock_irq(&c->vc.lock); + list_del_init(&c->node); + omap_dma_start_desc(c); + spin_unlock_irq(&c->vc.lock); + } +} + +static int omap_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + + return omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); +} + +static void omap_dma_free_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + omap_free_dma(c->dma_ch); + + dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); +} + +static size_t omap_dma_sg_size(struct omap_sg *sg) +{ + return sg->en * sg->fn; +} + +static size_t omap_dma_desc_size(struct omap_desc *d) +{ + unsigned i; + size_t size; + + for (size = i = 0; i < d->sglen; i++) + size += omap_dma_sg_size(&d->sg[i]); + + return size * es_bytes[d->es]; +} + +static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) +{ + unsigned i; + size_t size, es_size = es_bytes[d->es]; + + for (size = i = 0; i < d->sglen; i++) { + size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; + + if (size) + size += this_size; + else if (addr >= d->sg[i].addr && + addr < d->sg[i].addr + this_size) + size += d->sg[i].addr + this_size - addr; + } + return size; +} + +static enum dma_status omap_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_SUCCESS || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct omap_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = omap_get_dma_src_pos(c->dma_ch); + else if (d->dir == DMA_DEV_TO_MEM) + pos = omap_get_dma_dst_pos(c->dma_ch); + else + pos = 0; + + txstate->residue = omap_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void omap_dma_issue_pending(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned i, j = 0, es, en, frame_bytes, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_FRAME; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_TIPB; + + /* + * Build our scatterlist entries: each contains the address, + * the number of elements (EN) in each frame, and the number of + * frames (FN). Number of bytes for this entry = ES * EN * FN. + * + * Burst size translates to number of elements with frame sync. + * Note: DMA engine defines burst to be the number of dev-width + * transfers. + */ + en = burst; + frame_bytes = es_bytes[es] * en; + for_each_sg(sgl, sgent, sglen, i) { + d->sg[j].addr = sg_dma_address(sgent); + d->sg[j].en = en; + d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; + j++; + } + + d->sglen = j; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned es, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->fi = burst; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_PACKET; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_MPUI; + d->sg[0].addr = buf_addr; + d->sg[0].en = period_len / es_bytes[es]; + d->sg[0].fn = buf_len / period_len; + d->sglen = 1; + + if (!c->cyclic) { + c->cyclic = true; + omap_dma_link_lch(c->dma_ch, c->dma_ch); + omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); + omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); + } + + if (!cpu_class_is_omap1()) { + omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + } + + return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); +} + +static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) +{ + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + memcpy(&c->cfg, cfg, sizeof(c->cfg)); + + return 0; +} + +static int omap_dma_terminate_all(struct omap_chan *c) +{ + struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after omap_stop_dma() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + omap_stop_dma(c->dma_ch); + } + + if (c->cyclic) { + c->cyclic = false; + omap_dma_unlink_lch(c->dma_ch, c->dma_ch); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int omap_dma_pause(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_resume(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + int ret; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); + break; + + case DMA_TERMINATE_ALL: + ret = omap_dma_terminate_all(c); + break; + + case DMA_PAUSE: + ret = omap_dma_pause(c); + break; + + case DMA_RESUME: + ret = omap_dma_resume(c); + break; + + default: + ret = -ENXIO; + break; + } + + return ret; +} + +static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) +{ + struct omap_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->dma_sig = dma_sig; + c->vc.desc_free = omap_dma_desc_free; + vchan_init(&c->vc, &od->ddev); + INIT_LIST_HEAD(&c->node); + + od->ddev.chancnt++; + + return 0; +} + +static void omap_dma_free(struct omap_dmadev *od) +{ + tasklet_kill(&od->task); + while (!list_empty(&od->ddev.channels)) { + struct omap_chan *c = list_first_entry(&od->ddev.channels, + struct omap_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + kfree(c); + } + kfree(od); +} + +static int omap_dma_probe(struct platform_device *pdev) +{ + struct omap_dmadev *od; + int rc, i; + + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; + od->ddev.device_tx_status = omap_dma_tx_status; + od->ddev.device_issue_pending = omap_dma_issue_pending; + od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_control = omap_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + INIT_LIST_HEAD(&od->pending); + spin_lock_init(&od->lock); + + tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); + + for (i = 0; i < 127; i++) { + rc = omap_dma_chan_init(od, i); + if (rc) { + omap_dma_free(od); + return rc; + } + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", + rc); + omap_dma_free(od); + } else { + platform_set_drvdata(pdev, od); + } + + dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + + return rc; +} + +static int omap_dma_remove(struct platform_device *pdev) +{ + struct omap_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + + return 0; +} + +static struct platform_driver omap_dma_driver = { + .probe = omap_dma_probe, + .remove = omap_dma_remove, + .driver = { + .name = "omap-dma-engine", + .owner = THIS_MODULE, + }, +}; + +bool omap_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &omap_dma_driver.driver) { + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned req = *(unsigned *)param; + + return req == c->dma_sig; + } + return false; +} +EXPORT_SYMBOL_GPL(omap_dma_filter_fn); + +static struct platform_device *pdev; + +static const struct platform_device_info omap_dma_dev_info = { + .name = "omap-dma-engine", + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int omap_dma_init(void) +{ + int rc = platform_driver_register(&omap_dma_driver); + + if (rc == 0) { + pdev = platform_device_register_full(&omap_dma_dev_info); + if (IS_ERR(pdev)) { + platform_driver_unregister(&omap_dma_driver); + rc = PTR_ERR(pdev); + } + } + return rc; +} +subsys_initcall(omap_dma_init); + +static void __exit omap_dma_exit(void) +{ + platform_device_unregister(pdev); + platform_driver_unregister(&omap_dma_driver); +} +module_exit(omap_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ec78ccef913..f5a73606217 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -21,6 +21,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#include "virt-dma.h" + #define NR_PHY_CHAN 6 #define DMA_ALIGN 3 #define DMA_MAX_SIZE 0x1fff @@ -72,12 +74,13 @@ struct sa11x0_dma_sg { }; struct sa11x0_dma_desc { - struct dma_async_tx_descriptor tx; + struct virt_dma_desc vd; + u32 ddar; size_t size; + unsigned period; + bool cyclic; - /* maybe protected by c->lock */ - struct list_head node; unsigned sglen; struct sa11x0_dma_sg sg[0]; }; @@ -85,15 +88,11 @@ struct sa11x0_dma_desc { struct sa11x0_dma_phy; struct sa11x0_dma_chan { - struct dma_chan chan; - spinlock_t lock; - dma_cookie_t lc; + struct virt_dma_chan vc; - /* protected by c->lock */ + /* protected by c->vc.lock */ struct sa11x0_dma_phy *phy; enum dma_status status; - struct list_head desc_submitted; - struct list_head desc_issued; /* protected by d->lock */ struct list_head node; @@ -109,7 +108,7 @@ struct sa11x0_dma_phy { struct sa11x0_dma_chan *vchan; - /* Protected by c->lock */ + /* Protected by c->vc.lock */ unsigned sg_load; struct sa11x0_dma_desc *txd_load; unsigned sg_done; @@ -127,13 +126,12 @@ struct sa11x0_dma_dev { spinlock_t lock; struct tasklet_struct task; struct list_head chan_pending; - struct list_head desc_complete; struct sa11x0_dma_phy phy[NR_PHY_CHAN]; }; static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) { - return container_of(chan, struct sa11x0_dma_chan, chan); + return container_of(chan, struct sa11x0_dma_chan, vc.chan); } static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) @@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) return container_of(dmadev, struct sa11x0_dma_dev, slave); } -static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) +static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) { - return container_of(tx, struct sa11x0_dma_desc, tx); + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + + return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; } -static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) +static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) { - if (list_empty(&c->desc_issued)) - return NULL; - - return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); + kfree(container_of(vd, struct sa11x0_dma_desc, vd)); } static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) { - list_del(&txd->node); + list_del(&txd->vd.node); p->txd_load = txd; p->sg_load = 0; dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", - p->num, txd, txd->tx.cookie, txd->ddar); + p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); } static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, @@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, return; if (p->sg_load == txd->sglen) { - struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); + if (!txd->cyclic) { + struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); - /* - * We have reached the end of the current descriptor. - * Peek at the next descriptor, and if compatible with - * the current, start processing it. - */ - if (txn && txn->ddar == txd->ddar) { - txd = txn; - sa11x0_dma_start_desc(p, txn); + /* + * We have reached the end of the current descriptor. + * Peek at the next descriptor, and if compatible with + * the current, start processing it. + */ + if (txn && txn->ddar == txd->ddar) { + txd = txn; + sa11x0_dma_start_desc(p, txn); + } else { + p->txd_load = NULL; + return; + } } else { - p->txd_load = NULL; - return; + /* Cyclic: reset back to beginning */ + p->sg_load = 0; } } @@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd = p->txd_done; if (++p->sg_done == txd->sglen) { - struct sa11x0_dma_dev *d = p->dev; - - dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", - p->num, p->txd_done, p->txd_done->tx.cookie); - - c->lc = txd->tx.cookie; + if (!txd->cyclic) { + vchan_cookie_complete(&txd->vd); - spin_lock(&d->lock); - list_add_tail(&txd->node, &d->desc_complete); - spin_unlock(&d->lock); + p->sg_done = 0; + p->txd_done = p->txd_load; - p->sg_done = 0; - p->txd_done = p->txd_load; + if (!p->txd_done) + tasklet_schedule(&p->dev->task); + } else { + if ((p->sg_done % txd->period) == 0) + vchan_cyclic_callback(&txd->vd); - tasklet_schedule(&d->task); + /* Cyclic: reset back to beginning */ + p->sg_done = 0; + } } sa11x0_dma_start_sg(p, c); @@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (c) { unsigned long flags; - spin_lock_irqsave(&c->lock, flags); + spin_lock_irqsave(&c->vc.lock, flags); /* * Now that we're holding the lock, check that the vchan * really is associated with this pchan before touching the @@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (dcsr & DCSR_DONEB) sa11x0_dma_complete(p, c); } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); } return IRQ_HANDLED; @@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg) struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; struct sa11x0_dma_phy *p; struct sa11x0_dma_chan *c; - struct sa11x0_dma_desc *txd, *txn; - LIST_HEAD(head); unsigned pch, pch_alloc = 0; dev_dbg(d->slave.dev, "tasklet enter\n"); - /* Get the completed tx descriptors */ - spin_lock_irq(&d->lock); - list_splice_init(&d->desc_complete, &head); - spin_unlock_irq(&d->lock); - - list_for_each_entry(txd, &head, node) { - c = to_sa11x0_dma_chan(txd->tx.chan); - - dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", - c, txd, txd->tx.cookie); - - spin_lock_irq(&c->lock); + list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { + spin_lock_irq(&c->vc.lock); p = c->phy; - if (p) { - if (!p->txd_done) - sa11x0_dma_start_txd(c); + if (p && !p->txd_done) { + sa11x0_dma_start_txd(c); if (!p->txd_done) { /* No current txd associated with this channel */ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); @@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) p->vchan = NULL; } } - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } spin_lock_irq(&d->lock); @@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) /* Mark this channel allocated */ p->vchan = c; - dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); + dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); } } spin_unlock_irq(&d->lock); @@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg) p = &d->phy[pch]; c = p->vchan; - spin_lock_irq(&c->lock); + spin_lock_irq(&c->vc.lock); c->phy = p; sa11x0_dma_start_txd(c); - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } } - /* Now free the completed tx descriptor, and call their callbacks */ - list_for_each_entry_safe(txd, txn, &head, node) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", - txd, txd->tx.cookie); - - kfree(txd); - - if (callback) - callback(callback_param); - } - dev_dbg(d->slave.dev, "tasklet exit\n"); } -static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) -{ - struct sa11x0_dma_desc *txd, *txn; - - list_for_each_entry_safe(txd, txn, head, node) { - dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); - kfree(txd); - } -} - static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) { return 0; @@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - LIST_HEAD(head); - spin_lock_irqsave(&c->lock, flags); - spin_lock(&d->lock); + spin_lock_irqsave(&d->lock, flags); list_del_init(&c->node); - spin_unlock(&d->lock); - - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&d->lock, flags); - sa11x0_dma_desc_free(d, &head); + vchan_free_chan_resources(&c->vc); } static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) @@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; - struct sa11x0_dma_desc *txd; - dma_cookie_t last_used, last_complete; + struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; - size_t bytes = 0; - - last_used = c->chan.cookie; - last_complete = c->lc; - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) { - dma_set_tx_state(state, last_complete, last_used, 0); + ret = dma_cookie_status(&c->vc.chan, cookie, state); + if (ret == DMA_SUCCESS) return ret; - } - spin_lock_irqsave(&c->lock, flags); + if (!state) + return c->status; + + spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; - ret = c->status; - if (p) { - dma_addr_t addr = sa11x0_dma_pos(p); - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; + } else if (!p) { + state->residue = 0; + } else { + struct sa11x0_dma_desc *txd; + size_t bytes = 0; - txd = p->txd_done; + if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) + txd = p->txd_done; + else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) + txd = p->txd_load; + else + txd = NULL; + + ret = c->status; if (txd) { + dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", i, txd->sg[i].addr, txd->sg[i].len); @@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, bytes += txd->sg[i].len; } } - if (txd != p->txd_load && p->txd_load) - bytes += p->txd_load->size; - } - list_for_each_entry(txd, &c->desc_issued, node) { - bytes += txd->size; + state->residue = bytes; } - spin_unlock_irqrestore(&c->lock, flags); - - dma_set_tx_state(state, last_complete, last_used, bytes); + spin_unlock_irqrestore(&c->vc.lock, flags); - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); return ret; } @@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &c->desc_issued); - if (!list_empty(&c->desc_issued)) { - spin_lock(&d->lock); - if (!c->phy && list_empty(&c->node)) { - list_add_tail(&c->node, &d->chan_pending); - tasklet_schedule(&d->task); - dev_dbg(d->slave.dev, "vchan %p: issued\n", c); + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + if (!c->phy) { + spin_lock(&d->lock); + if (list_empty(&c->node)) { + list_add_tail(&c->node, &d->chan_pending); + tasklet_schedule(&d->task); + dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); + } + spin_unlock(&d->lock); } - spin_unlock(&d->lock); } else - dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); - spin_unlock_irqrestore(&c->lock, flags); -} - -static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); - struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); - unsigned long flags; - - spin_lock_irqsave(&c->lock, flags); - c->chan.cookie += 1; - if (c->chan.cookie < 0) - c->chan.cookie = 1; - txd->tx.cookie = c->chan.cookie; - - list_add_tail(&txd->node, &c->desc_submitted); - spin_unlock_irqrestore(&c->lock, flags); - - dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", - c, txd, txd->tx.cookie); - - return txd->tx.cookie; + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); + spin_unlock_irqrestore(&c->vc.lock, flags); } static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( @@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( /* SA11x0 channels can only operate in their native direction */ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", - c, c->ddar, dir); + &c->vc, c->ddar, dir); return NULL; } @@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", - c, addr); + &c->vc, addr); return NULL; } } txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); if (!txd) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); return NULL; } @@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( } while (len); } - dma_async_tx_descriptor_init(&txd->tx, &c->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = sa11x0_dma_tx_submit; txd->ddar = c->ddar; txd->size = size; txd->sglen = j; dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", - c, txd, txd->size, txd->sglen); + &c->vc, &txd->vd, txd->size, txd->sglen); - return &txd->tx; + return vchan_tx_prep(&c->vc, &txd->vd, flags); +} + +static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, + enum dma_transfer_direction dir, void *context) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_desc *txd; + unsigned i, j, k, sglen, sgperiod; + + /* SA11x0 channels can only operate in their native direction */ + if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { + dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", + &c->vc, c->ddar, dir); + return NULL; + } + + sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); + sglen = size * sgperiod / period; + + /* Do not allow zero-sized txds */ + if (sglen == 0) + return NULL; + + txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); + if (!txd) { + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); + return NULL; + } + + for (i = k = 0; i < size / period; i++) { + size_t tlen, len = period; + + for (j = 0; j < sgperiod; j++, k++) { + tlen = len; + + if (tlen > DMA_MAX_SIZE) { + unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); + tlen = (tlen / mult) & ~DMA_ALIGN; + } + + txd->sg[k].addr = addr; + txd->sg[k].len = tlen; + addr += tlen; + len -= tlen; + } + + WARN_ON(len != 0); + } + + WARN_ON(k != sglen); + + txd->ddar = c->ddar; + txd->size = size; + txd->sglen = sglen; + txd->cyclic = 1; + txd->period = sgperiod; + + return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) @@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c if (maxburst == 8) ddar |= DDAR_BS; - dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", - c, addr, width, maxburst); + dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", + &c->vc, addr, width, maxburst); c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; @@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); p = c->phy; if (p) { - struct sa11x0_dma_desc *txd, *txn; - dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); /* vchan is assigned to a pchan - stop the channel */ writel(DCSR_RUN | DCSR_IE | @@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, DCSR_STRTB | DCSR_DONEB, p->base + DMA_DCSR_C); - list_for_each_entry_safe(txd, txn, &d->desc_complete, node) - if (txd->tx.chan == &c->chan) - list_move(&txd->node, &head); - if (p->txd_load) { if (p->txd_load != p->txd_done) - list_add_tail(&p->txd_load->node, &head); + list_add_tail(&p->txd_load->vd.node, &head); p->txd_load = NULL; } if (p->txd_done) { - list_add_tail(&p->txd_done->node, &head); + list_add_tail(&p->txd_done->vd.node, &head); p->txd_done = NULL; } c->phy = NULL; @@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); tasklet_schedule(&d->task); } - spin_unlock_irqrestore(&c->lock, flags); - sa11x0_dma_desc_free(d, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); ret = 0; break; case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_IN_PROGRESS) { c->status = DMA_PAUSED; @@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_PAUSED) { c->status = DMA_IN_PROGRESS; p = c->phy; if (p) { writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); - } else if (!list_empty(&c->desc_issued)) { + } else if (!list_empty(&c->vc.desc_issued)) { spin_lock(&d->lock); list_add_tail(&c->node, &d->chan_pending); spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; @@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, return -ENOMEM; } - c->chan.device = dmadev; c->status = DMA_IN_PROGRESS; c->ddar = chan_desc[i].ddar; c->name = chan_desc[i].name; - spin_lock_init(&c->lock); - INIT_LIST_HEAD(&c->desc_submitted); - INIT_LIST_HEAD(&c->desc_issued); INIT_LIST_HEAD(&c->node); - list_add_tail(&c->chan.device_node, &dmadev->channels); + + c->vc.desc_free = sa11x0_dma_free_desc; + vchan_init(&c->vc, dmadev); } return dma_async_device_register(dmadev); @@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev) { struct sa11x0_dma_chan *c, *cn; - list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { - list_del(&c->chan.device_node); + list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); kfree(c); } } @@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); - INIT_LIST_HEAD(&d->desc_complete); d->base = ioremap(res->start, resource_size(res)); if (!d->base) { @@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) } dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); if (ret) { dev_warn(d->slave.dev, "failed to register slave async device: %d\n", diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c new file mode 100644 index 00000000000..6f80432a3f0 --- /dev/null +++ b/drivers/dma/virt-dma.c @@ -0,0 +1,123 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_add_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd; + dma_async_tx_callback cb = NULL; + void *cb_data = NULL; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + } + spin_unlock_irq(&vc->lock); + + if (cb) + cb(cb_data); + + while (!list_empty(&head)) { + vd = list_first_entry(&head, struct virt_dma_desc, node); + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + + list_del(&vd->node); + + vc->desc_free(vd); + + if (cb) + cb(cb_data); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + while (!list_empty(head)) { + struct virt_dma_desc *vd = list_first_entry(head, + struct virt_dma_desc, node); + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + + tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h new file mode 100644 index 00000000000..85c19d63f9f --- /dev/null +++ b/drivers/dma/virt-dma.h @@ -0,0 +1,152 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); + +/** + * vchan_tx_prep - prepare a descriptor + * vc: virtual channel allocating this descriptor + * vd: virtual descriptor to prepare + * tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, vd->tx.cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_cyclic_callback - report the completion of a period + * vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + if (list_empty(&vc->desc_issued)) + return NULL; + + return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * vc: virtual channel to get descriptors from + * head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif |