diff options
Diffstat (limited to 'include/linux/dmaengine.h')
-rw-r--r-- | include/linux/dmaengine.h | 54 |
1 files changed, 51 insertions, 3 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cb286b1acdb..0bc72753410 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -38,7 +38,10 @@ typedef s32 dma_cookie_t; #define DMA_MIN_COOKIE 1 #define DMA_MAX_COOKIE INT_MAX -#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) +static inline int dma_submit_error(dma_cookie_t cookie) +{ + return cookie < 0 ? cookie : 0; +} /** * enum dma_status - DMA transaction status @@ -370,6 +373,25 @@ struct dma_slave_config { unsigned int slave_id; }; +/* struct dma_slave_caps - expose capabilities of a slave channel only + * + * @src_addr_widths: bit mask of src addr widths the channel supports + * @dstn_addr_widths: bit mask of dstn addr widths the channel supports + * @directions: bit mask of slave direction the channel supported + * since the enum dma_transfer_direction is not defined as bits for each + * type of direction, the dma controller should fill (1 << <TYPE>) and same + * should be checked by controller as well + * @cmd_pause: true, if pause and thereby resume is supported + * @cmd_terminate: true, if terminate cmd is supported + */ +struct dma_slave_caps { + u32 src_addr_widths; + u32 dstn_addr_widths; + u32 directions; + bool cmd_pause; + bool cmd_terminate; +}; + static inline const char *dma_chan_name(struct dma_chan *chan) { return dev_name(&chan->dev->device); @@ -532,6 +554,7 @@ struct dma_tx_state { * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware + * @device_slave_caps: return the slave channel capabilities */ struct dma_device { @@ -597,6 +620,7 @@ struct dma_device { dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); + int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); }; static inline int dmaengine_device_control(struct dma_chan *chan, @@ -670,6 +694,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( return chan->device->device_prep_interleaved_dma(chan, xt, flags); } +static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) +{ + if (!chan || !caps) + return -EINVAL; + + /* check if the channel supports slave transactions */ + if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits)) + return -ENXIO; + + if (chan->device->device_slave_caps) + return chan->device->device_slave_caps(chan, caps); + + return -ENXIO; +} + static inline int dmaengine_terminate_all(struct dma_chan *chan) { return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); @@ -958,8 +997,9 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, } } -enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); #ifdef CONFIG_DMA_ENGINE +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, @@ -967,6 +1007,14 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); #else +static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) +{ + return NULL; +} +static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + return DMA_SUCCESS; +} static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { return DMA_SUCCESS; @@ -994,7 +1042,7 @@ static inline void dma_release_channel(struct dma_chan *chan) int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *net_dma_find_channel(void); #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |