diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 09:24:48 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 09:24:48 -0800 |
commit | 5115f3c19d17851aaff5a857f55b4a019c908775 (patch) | |
tree | 0d02cf01e12e86365f4f5e3b234f986daef181a7 /include/linux/dmaengine.h | |
parent | c41b3810c09e60664433548c5218cc6ece6a8903 (diff) | |
parent | 17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This is fairly big pull by my standards as I had missed last merge
window. So we have the support for device tree for slave-dmaengine,
large updates to dw_dmac driver from Andy for reusing on different
architectures. Along with this we have fixes on bunch of the drivers"
Fix up trivial conflicts, usually due to #include line movement next to
each other.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits)
Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT"
ARM: dts: pl330: Add #dma-cells for generic dma binding support
DMA: PL330: Register the DMA controller with the generic DMA helpers
DMA: PL330: Add xlate function
DMA: PL330: Add new pl330 filter for DT case.
dma: tegra20-apb-dma: remove unnecessary assignment
edma: do not waste memory for dma_mask
dma: coh901318: set residue only if dma is in progress
dma: coh901318: avoid unbalanced locking
dmaengine.h: remove redundant else keyword
dma: of-dma: protect list write operation by spin_lock
dmaengine: ste_dma40: do not remove descriptors for cyclic transfers
dma: of-dma.c: fix memory leakage
dw_dmac: apply default dma_mask if needed
dmaengine: ioat - fix spare sparse complain
dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c
ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING
dw_dmac: add support for Lynxpoint DMA controllers
dw_dmac: return proper residue value
dw_dmac: fill individual length of descriptor
...
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 48 |
1 files changed, 24 insertions, 24 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d3201e438d1..f5939999cb6 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { - return chan->device->device_control(chan, cmd, arg); + if (chan->device->device_control) + return chan->device->device_control(chan, cmd, arg); + + return -ENOSYS; } static inline int dmaengine_slave_config(struct dma_chan *chan, @@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, (unsigned long)config); } +static inline bool is_slave_direction(enum dma_transfer_direction direction) +{ + return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); +} + static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( struct dma_chan *chan, dma_addr_t buf, size_t len, enum dma_transfer_direction dir, unsigned long flags) @@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( period_len, dir, flags, NULL); } +static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + return chan->device->device_prep_interleaved_dma(chan, xt, flags); +} + static inline int dmaengine_terminate_all(struct dma_chan *chan) { return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); @@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; } -#define first_dma_cap(mask) __first_dma_cap(&(mask)) -static inline int __first_dma_cap(const dma_cap_mask_t *srcp) -{ - return min_t(int, DMA_TX_TYPE_END, - find_first_bit(srcp->bits, DMA_TX_TYPE_END)); -} - -#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) -static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) -{ - return min_t(int, DMA_TX_TYPE_END, - find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); -} - #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) static inline void __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) @@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) } #define for_each_dma_cap_mask(cap, mask) \ - for ((cap) = first_dma_cap(mask); \ - (cap) < DMA_TX_TYPE_END; \ - (cap) = next_dma_cap((cap), (mask))) + for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) /** * dma_async_issue_pending - flush pending transactions to HW @@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) chan->device->device_issue_pending(chan); } -#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) - /** * dma_async_is_tx_complete - poll for transaction completion * @chan: DMA channel @@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, return status; } -#define dma_async_memcpy_complete(chan, cookie, last, used)\ - dma_async_is_tx_complete(chan, cookie, last, used) - /** * dma_async_is_complete - test a cookie against chan state * @cookie: transaction identifier to test status of * @last_complete: last know completed transaction * @last_used: last cookie value handed out * - * dma_async_is_complete() is used in dma_async_memcpy_complete() + * dma_async_is_complete() is used in dma_async_is_tx_complete() * the test logic is separated for lightweight testing of multiple cookies */ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, @@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +struct dma_chan *dma_request_slave_channel(struct device *dev, char *name); void dma_release_channel(struct dma_chan *chan); #else static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) @@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, { return NULL; } +static inline struct dma_chan *dma_request_slave_channel(struct device *dev, + char *name) +{ + return NULL; +} static inline void dma_release_channel(struct dma_chan *chan) { } |