diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-02-02 19:49:58 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2008-02-06 10:12:18 -0700 |
commit | d4c56f97ff21df405d0cebe11f49e3c3c79662b5 (patch) | |
tree | e6b0de433d7c985982ac12815998242a786d87b2 /drivers/dma | |
parent | 0036731c88fdb5bf4f04a796a30b5e445fc57f54 (diff) |
async_tx: replace 'int_en' with operation preparation flags
Pass a full set of flags to drivers' per-operation 'prep' routines.
Currently the only flag passed is DMA_PREP_INTERRUPT. The expectation is
that arch-specific async_tx_find_channel() implementations can exploit this
capability to find the best channel for an operation.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Reviewed-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ioat_dma.c | 4 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 20 |
2 files changed, 12 insertions, 12 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 5bcfc55a277..dff38accc5c 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -701,7 +701,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, - int int_en) + unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -724,7 +724,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, - int int_en) + unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index eda841c6069..3986d54492b 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -537,7 +537,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) static struct dma_async_tx_descriptor * iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, int int_en) + dma_addr_t dma_src, size_t len, unsigned long flags) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_desc_slot *sw_desc, *grp_start; @@ -555,7 +555,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); if (sw_desc) { grp_start = sw_desc->group_head; - iop_desc_init_memcpy(grp_start, int_en); + iop_desc_init_memcpy(grp_start, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_memcpy_src_addr(grp_start, dma_src); @@ -569,7 +569,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, static struct dma_async_tx_descriptor * iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, - int value, size_t len, int int_en) + int value, size_t len, unsigned long flags) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_desc_slot *sw_desc, *grp_start; @@ -587,7 +587,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); if (sw_desc) { grp_start = sw_desc->group_head; - iop_desc_init_memset(grp_start, int_en); + iop_desc_init_memset(grp_start, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_block_fill_val(grp_start, value); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); @@ -602,7 +602,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, static struct dma_async_tx_descriptor * iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t *dma_src, unsigned int src_cnt, size_t len, - int int_en) + unsigned long flags) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_desc_slot *sw_desc, *grp_start; @@ -613,15 +613,15 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u int_en: %d\n", - __FUNCTION__, src_cnt, len, int_en); + "%s src_cnt: %d len: %u flags: %lx\n", + __FUNCTION__, src_cnt, len, flags); spin_lock_bh(&iop_chan->lock); slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); if (sw_desc) { grp_start = sw_desc->group_head; - iop_desc_init_xor(grp_start, src_cnt, int_en); + iop_desc_init_xor(grp_start, src_cnt, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); sw_desc->unmap_src_cnt = src_cnt; @@ -638,7 +638,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, static struct dma_async_tx_descriptor * iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, unsigned int src_cnt, size_t len, u32 *result, - int int_en) + unsigned long flags) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_desc_slot *sw_desc, *grp_start; @@ -655,7 +655,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); if (sw_desc) { grp_start = sw_desc->group_head; - iop_desc_init_zero_sum(grp_start, src_cnt, int_en); + iop_desc_init_zero_sum(grp_start, src_cnt, flags); iop_desc_set_zero_sum_byte_count(grp_start, len); grp_start->xor_check_result = result; pr_debug("\t%s: grp_start->xor_check_result: %p\n", |