diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 13:20:24 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 13:20:24 -0800 |
commit | e6d69a60b77a6ea8d5f9d41765c7571bb8d45531 (patch) | |
tree | 4ea3fe7c49a864da2ce7ffb51a703661826dc15d /include/linux | |
parent | 5a1efc6e68a095917277459091fafba6a6baef17 (diff) | |
parent | df12a3178d340319b1955be6b973a4eb84aff754 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine changes from Vinod Koul:
"This brings for slave dmaengine:
- Change dma notification flag to DMA_COMPLETE from DMA_SUCCESS as
dmaengine can only transfer and not verify validaty of dma
transfers
- Bunch of fixes across drivers:
- cppi41 driver fixes from Daniel
- 8 channel freescale dma engine support and updated bindings from
Hongbo
- msx-dma fixes and cleanup by Markus
- DMAengine updates from Dan:
- Bartlomiej and Dan finalized a rework of the dma address unmap
implementation.
- In the course of testing 1/ a collection of enhancements to
dmatest fell out. Notably basic performance statistics, and
fixed / enhanced test control through new module parameters
'run', 'wait', 'noverify', and 'verbose'. Thanks to Andriy and
Linus [Walleij] for their review.
- Testing the raid related corner cases of 1/ triggered bugs in
the recently added 16-source operation support in the ioatdma
driver.
- Some minor fixes / cleanups to mv_xor and ioatdma"
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (99 commits)
dma: mv_xor: Fix mis-usage of mmio 'base' and 'high_base' registers
dma: mv_xor: Remove unneeded NULL address check
ioat: fix ioat3_irq_reinit
ioat: kill msix_single_vector support
raid6test: add new corner case for ioatdma driver
ioatdma: clean up sed pool kmem_cache
ioatdma: fix selection of 16 vs 8 source path
ioatdma: fix sed pool selection
ioatdma: Fix bug in selftest after removal of DMA_MEMSET.
dmatest: verbose mode
dmatest: convert to dmaengine_unmap_data
dmatest: add a 'wait' parameter
dmatest: add basic performance metrics
dmatest: add support for skipping verification and random data setup
dmatest: use pseudo random numbers
dmatest: support xor-only, or pq-only channels in tests
dmatest: restore ability to start test at module load and init
dmatest: cleanup redundant "dmatest: " prefixes
dmatest: replace stored results mechanism, with uniform messages
Revert "dmatest: append verify result to results"
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/dmaengine.h | 76 | ||||
-rw-r--r-- | include/linux/platform_data/edma.h | 8 |
2 files changed, 60 insertions, 24 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc72753410..41cf0c39928 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie) /** * enum dma_status - DMA transaction status - * @DMA_SUCCESS: transaction completed successfully + * @DMA_COMPLETE: transaction completed * @DMA_IN_PROGRESS: transaction not yet processed * @DMA_PAUSED: transaction is paused * @DMA_ERROR: transaction failed */ enum dma_status { - DMA_SUCCESS, + DMA_COMPLETE, DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, @@ -171,12 +171,6 @@ struct dma_interleaved_template { * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any dependency * chains - * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) - * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) - * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single - * (if not set, do the source dma-unmapping as page) - * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single - * (if not set, do the destination dma-unmapping as page) * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as @@ -188,14 +182,10 @@ struct dma_interleaved_template { enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), - DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), - DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), - DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), - DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), - DMA_PREP_PQ_DISABLE_P = (1 << 6), - DMA_PREP_PQ_DISABLE_Q = (1 << 7), - DMA_PREP_CONTINUE = (1 << 8), - DMA_PREP_FENCE = (1 << 9), + DMA_PREP_PQ_DISABLE_P = (1 << 2), + DMA_PREP_PQ_DISABLE_Q = (1 << 3), + DMA_PREP_CONTINUE = (1 << 4), + DMA_PREP_FENCE = (1 << 5), }; /** @@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); + +struct dmaengine_unmap_data { + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; + struct dmaengine_unmap_data *unmap; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { #endif }; +#ifdef CONFIG_DMA_ENGINE +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ + kref_get(&unmap->kref); + tx->unmap = unmap; +} + +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); +#else +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ +} +static inline struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + return NULL; +} +static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ +} +#endif + +static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) +{ + if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; + } +} + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH static inline void txd_lock(struct dma_async_tx_descriptor *txd) { @@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, { if (last_complete <= last_used) { if ((cookie <= last_complete) || (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } else { if ((cookie <= last_complete) && (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } return DMA_IN_PROGRESS; } @@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ } static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline void dma_issue_pending_all(void) { diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 179fb91bb5f..f50821cb64b 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -67,10 +67,10 @@ struct edmacc_param { #define ITCCHEN BIT(23) /*ch_status paramater of callback function possible values*/ -#define DMA_COMPLETE 1 -#define DMA_CC_ERROR 2 -#define DMA_TC1_ERROR 3 -#define DMA_TC2_ERROR 4 +#define EDMA_DMA_COMPLETE 1 +#define EDMA_DMA_CC_ERROR 2 +#define EDMA_DMA_TC1_ERROR 3 +#define EDMA_DMA_TC2_ERROR 4 enum address_mode { INCR = 0, |