From 65bc3ffe8c067e387fe5557bc3ea5071071f6af9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 5 Jun 2008 23:26:11 -0700 Subject: async_tx: fix async_memset compile error commit 636bdeaa 'dmaengine: ack to flags: make use of the unused bits in the 'ack' field' missed an ->ack conversion in crypto/async_tx/async_memset.c Signed-off-by: Dan Williams --- crypto/async_tx/async_memset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index f5ff3906b03..27a97dc90a7 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -76,7 +76,7 @@ async_memset(struct page *dest, int val, unsigned int offset, /* if ack is already set then we cannot be sure * we are referring to the correct operation */ - BUG_ON(depend_tx->ack); + BUG_ON(async_tx_test_ack(depend_tx)); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", __func__); -- cgit v1.2.3-70-g09d2 From 20fc190b0ef58bf8b3b0bff9de122083956f82ec Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 17 Jul 2008 17:59:47 -0700 Subject: async_tx: list_for_each_entry_rcu() cleanup In the rcu update side, don't use list_for_each_entry_rcu(). Signed-off-by: Li Zefan Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index c6e772fc5cc..9325c61208a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -294,7 +294,7 @@ dma_channel_add_remove(struct dma_client *client, case DMA_RESOURCE_REMOVED: found = 0; spin_lock_irqsave(&async_tx_lock, flags); - list_for_each_entry_rcu(ref, &async_tx_master_list, node) + list_for_each_entry(ref, &async_tx_master_list, node) if (ref->chan == chan) { /* permit backing devices to go away */ dma_chan_put(ref->chan); -- cgit v1.2.3-70-g09d2 From 1e55db2d6bdef92abc981b68673564e63c80da4d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 16 Jul 2008 19:44:56 -0700 Subject: async_tx: ensure the xor destination buffer remains dma-mapped When the number of source buffers for an xor operation exceeds the hardware channel maximum async_xor creates a chain of dependent operations. The result of one operation is reused as an input to the next to continue the xor calculation. The destination buffer should remain mapped for the duration of the entire chain. To provide this guarantee the code must no longer be allowed to fallback to the synchronous path as this will preclude the buffer from being unmapped, i.e. the dma-driver will potentially miss the descriptor with !DMA_COMPL_SKIP_DEST_UNMAP. Cc: Neil Brown Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 244 ++++++++++++++++++++------------------------ 1 file changed, 113 insertions(+), 131 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 3a0dddca5a1..1fcf45ac81e 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -35,72 +35,118 @@ * when CONFIG_DMA_ENGINE=n */ static __always_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_device *device, - struct dma_chan *chan, struct page *dest, struct page **src_list, - unsigned int offset, unsigned int src_cnt, size_t len, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, + unsigned int offset, int src_cnt, size_t len, + enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_param) { - dma_addr_t dma_dest; + struct dma_device *dma = chan->device; dma_addr_t *dma_src = (dma_addr_t *) src_list; - struct dma_async_tx_descriptor *tx; + struct dma_async_tx_descriptor *tx = NULL; + int src_off = 0; int i; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; - - pr_debug("%s: len: %zu\n", __func__, len); - - dma_dest = dma_map_page(device->dev, dest, offset, len, - DMA_FROM_DEVICE); + dma_async_tx_callback _cb_fn; + void *_cb_param; + enum async_tx_flags async_flags; + enum dma_ctrl_flags dma_flags; + int xor_src_cnt; + dma_addr_t dma_dest; + dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); for (i = 0; i < src_cnt; i++) - dma_src[i] = dma_map_page(device->dev, src_list[i], offset, + dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, len, DMA_TO_DEVICE); - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward progress - * in case they can not provide a descriptor - */ - tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, - dma_prep_flags); - if (!tx) { - if (depend_tx) + while (src_cnt) { + async_flags = flags; + dma_flags = 0; + xor_src_cnt = min(src_cnt, dma->max_xor); + /* if we are submitting additional xors, leave the chain open, + * clear the callback parameters, and leave the destination + * buffer mapped + */ + if (src_cnt > xor_src_cnt) { + async_flags &= ~ASYNC_TX_ACK; + dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; + _cb_fn = NULL; + _cb_param = NULL; + } else { + _cb_fn = cb_fn; + _cb_param = cb_param; + } + if (_cb_fn) + dma_flags |= DMA_PREP_INTERRUPT; + + /* Since we have clobbered the src_list we are committed + * to doing this asynchronously. Drivers force forward progress + * in case they can not provide a descriptor + */ + tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], + xor_src_cnt, len, dma_flags); + + if (unlikely(!tx && depend_tx)) dma_wait_for_async_tx(depend_tx); - while (!tx) - tx = device->device_prep_dma_xor(chan, dma_dest, - dma_src, src_cnt, len, - dma_prep_flags); - } + /* spin wait for the preceeding transactions to complete */ + while (unlikely(!tx)) + tx = dma->device_prep_dma_xor(chan, dma_dest, + &dma_src[src_off], + xor_src_cnt, len, + dma_flags); + + async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, + _cb_param); + + depend_tx = tx; + flags |= ASYNC_TX_DEP_ACK; + + if (src_cnt > xor_src_cnt) { + /* drop completed sources */ + src_cnt -= xor_src_cnt; + src_off += xor_src_cnt; - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + /* use the intermediate result a source */ + dma_src[--src_off] = dma_dest; + src_cnt++; + } else + break; + } return tx; } static void do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, - unsigned int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + int src_cnt, size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_param) { - void *_dest; int i; - - pr_debug("%s: len: %zu\n", __func__, len); + int xor_src_cnt; + int src_off = 0; + void *dest_buf; + void **srcs = (void **) src_list; /* reuse the 'src_list' array to convert to buffer pointers */ for (i = 0; i < src_cnt; i++) - src_list[i] = (struct page *) - (page_address(src_list[i]) + offset); + srcs[i] = page_address(src_list[i]) + offset; /* set destination address */ - _dest = page_address(dest) + offset; + dest_buf = page_address(dest) + offset; if (flags & ASYNC_TX_XOR_ZERO_DST) - memset(_dest, 0, len); + memset(dest_buf, 0, len); + + while (src_cnt > 0) { + /* process up to 'MAX_XOR_BLOCKS' sources */ + xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); + xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]); - xor_blocks(src_cnt, len, _dest, - (void **) src_list); + /* drop completed sources */ + src_cnt -= xor_src_cnt; + src_off += xor_src_cnt; + } async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } @@ -132,106 +178,42 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, &dest, 1, src_list, src_cnt, len); - struct dma_device *device = chan ? chan->device : NULL; - struct dma_async_tx_descriptor *tx = NULL; - dma_async_tx_callback _cb_fn; - void *_cb_param; - unsigned long local_flags; - int xor_src_cnt; - int i = 0, src_off = 0; - BUG_ON(src_cnt <= 1); - while (src_cnt) { - local_flags = flags; - if (device) { /* run the xor asynchronously */ - xor_src_cnt = min(src_cnt, device->max_xor); - /* if we are submitting additional xors - * only set the callback on the last transaction - */ - if (src_cnt > xor_src_cnt) { - local_flags &= ~ASYNC_TX_ACK; - _cb_fn = NULL; - _cb_param = NULL; - } else { - _cb_fn = cb_fn; - _cb_param = cb_param; - } - - tx = do_async_xor(device, chan, dest, - &src_list[src_off], offset, - xor_src_cnt, len, local_flags, - depend_tx, _cb_fn, _cb_param); - } else { /* run the xor synchronously */ - /* in the sync case the dest is an implied source - * (assumes the dest is at the src_off index) - */ - if (flags & ASYNC_TX_XOR_DROP_DST) { - src_cnt--; - src_off++; - } - - /* process up to 'MAX_XOR_BLOCKS' sources */ - xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); + if (chan) { + /* run the xor asynchronously */ + pr_debug("%s (async): len: %zu\n", __func__, len); - /* if we are submitting additional xors - * only set the callback on the last transaction - */ - if (src_cnt > xor_src_cnt) { - local_flags &= ~ASYNC_TX_ACK; - _cb_fn = NULL; - _cb_param = NULL; - } else { - _cb_fn = cb_fn; - _cb_param = cb_param; - } - - /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == - DMA_ERROR) - panic("%s: DMA_ERROR waiting for " - "depend_tx\n", - __func__); - } - - do_sync_xor(dest, &src_list[src_off], offset, - xor_src_cnt, len, local_flags, depend_tx, - _cb_fn, _cb_param); - } + return do_async_xor(chan, dest, src_list, offset, src_cnt, len, + flags, depend_tx, cb_fn, cb_param); + } else { + /* run the xor synchronously */ + pr_debug("%s (sync): len: %zu\n", __func__, len); - /* the previous tx is hidden from the client, - * so ack it + /* in the sync case the dest is an implied source + * (assumes the dest is the first source) */ - if (i && depend_tx) - async_tx_ack(depend_tx); - - depend_tx = tx; + if (flags & ASYNC_TX_XOR_DROP_DST) { + src_cnt--; + src_list++; + } - if (src_cnt > xor_src_cnt) { - /* drop completed sources */ - src_cnt -= xor_src_cnt; - src_off += xor_src_cnt; + /* wait for any prerequisite operations */ + if (depend_tx) { + /* if ack is already set then we cannot be sure + * we are referring to the correct operation + */ + BUG_ON(async_tx_test_ack(depend_tx)); + if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) + panic("%s: DMA_ERROR waiting for depend_tx\n", + __func__); + } - /* unconditionally preserve the destination */ - flags &= ~ASYNC_TX_XOR_ZERO_DST; + do_sync_xor(dest, src_list, offset, src_cnt, len, + flags, depend_tx, cb_fn, cb_param); - /* use the intermediate result a source, but remember - * it's dropped, because it's implied, in the sync case - */ - src_list[--src_off] = dest; - src_cnt++; - flags |= ASYNC_TX_XOR_DROP_DST; - } else - src_cnt = 0; - i++; + return NULL; } - - return tx; } EXPORT_SYMBOL_GPL(async_xor); -- cgit v1.2.3-70-g09d2 From 669ab0b210f9bd15d94d4d6a49ae13366a85e4da Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 17 Jul 2008 17:59:55 -0700 Subject: async_tx: fix handling of the "out of descriptor" condition in async_xor Ensure forward progress is made when a dmaengine driver is unable to allocate an xor descriptor by breaking the dependency chain with async_tx_quisce() and issue any pending descriptors. Tested with iop-adma by setting device->max_xor = 2 to force multiple calls to device_prep_dma_xor for each call to async_xor and limiting the descriptor slot pool to 5. Discovered that the minimum descriptor pool size for iop-adma is 2 * iop_chan_xor_slot_cnt(device->max_xor) + 1. Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1fcf45ac81e..19d16e452bc 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -30,6 +30,24 @@ #include #include +/** + * async_tx_quiesce - ensure tx is complete and freeable upon return + * @tx - transaction to quiesce + */ +static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) +{ + if (*tx) { + /* if ack is already set then we cannot be sure + * we are referring to the correct operation + */ + BUG_ON(async_tx_test_ack(*tx)); + if (dma_wait_for_async_tx(*tx) == DMA_ERROR) + panic("DMA_ERROR waiting for transaction\n"); + async_tx_ack(*tx); + *tx = NULL; + } +} + /* do_async_xor - dma map the pages and perform the xor with an engine. * This routine is marked __always_inline so it can be compiled away * when CONFIG_DMA_ENGINE=n @@ -85,15 +103,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], xor_src_cnt, len, dma_flags); - if (unlikely(!tx && depend_tx)) - dma_wait_for_async_tx(depend_tx); + if (unlikely(!tx)) + async_tx_quiesce(&depend_tx); /* spin wait for the preceeding transactions to complete */ - while (unlikely(!tx)) + while (unlikely(!tx)) { + dma_async_issue_pending(chan); tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], xor_src_cnt, len, dma_flags); + } async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, _cb_param); @@ -267,11 +287,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, len, result, dma_prep_flags); - if (!tx) { - if (depend_tx) - dma_wait_for_async_tx(depend_tx); + if (unlikely(!tx)) { + async_tx_quiesce(&depend_tx); while (!tx) + dma_async_issue_pending(chan); tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, len, result, dma_prep_flags); -- cgit v1.2.3-70-g09d2 From d2c52b7983b95bb3fc2a784e479f832f142d4523 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 17 Jul 2008 17:59:55 -0700 Subject: async_tx: export async_tx_quiesce Replace open coded "wait and acknowledge" instances with async_tx_quiesce. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 10 +--------- crypto/async_tx/async_memset.c | 10 +--------- crypto/async_tx/async_tx.c | 29 ++++++++++++++++++++--------- crypto/async_tx/async_xor.c | 37 ++----------------------------------- include/linux/async_tx.h | 2 ++ 5 files changed, 26 insertions(+), 62 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index a5eda80e842..06a7f4be973 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; src_buf = kmap_atomic(src, KM_USER1) + src_offset; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 27a97dc90a7..d48ed22ed1c 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -72,15 +72,7 @@ async_memset(struct page *dest, int val, unsigned int offset, dest_buf = (void *) (((char *) page_address(dest)) + offset); /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); memset(dest_buf, val, len); diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 9325c61208a..78a61e7f631 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -607,15 +607,7 @@ async_trigger_callback(enum async_tx_flags flags, pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } @@ -624,6 +616,25 @@ async_trigger_callback(enum async_tx_flags flags, } EXPORT_SYMBOL_GPL(async_trigger_callback); +/** + * async_tx_quiesce - ensure tx is complete and freeable upon return + * @tx - transaction to quiesce + */ +void async_tx_quiesce(struct dma_async_tx_descriptor **tx) +{ + if (*tx) { + /* if ack is already set then we cannot be sure + * we are referring to the correct operation + */ + BUG_ON(async_tx_test_ack(*tx)); + if (dma_wait_for_async_tx(*tx) == DMA_ERROR) + panic("DMA_ERROR waiting for transaction\n"); + async_tx_ack(*tx); + *tx = NULL; + } +} +EXPORT_SYMBOL_GPL(async_tx_quiesce); + module_init(async_tx_init); module_exit(async_tx_exit); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 19d16e452bc..689ecce73ee 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -30,24 +30,6 @@ #include #include -/** - * async_tx_quiesce - ensure tx is complete and freeable upon return - * @tx - transaction to quiesce - */ -static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) -{ - if (*tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(*tx)); - if (dma_wait_for_async_tx(*tx) == DMA_ERROR) - panic("DMA_ERROR waiting for transaction\n"); - async_tx_ack(*tx); - *tx = NULL; - } -} - /* do_async_xor - dma map the pages and perform the xor with an engine. * This routine is marked __always_inline so it can be compiled away * when CONFIG_DMA_ENGINE=n @@ -219,15 +201,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, } /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, flags, depend_tx, cb_fn, cb_param); @@ -309,17 +283,10 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, depend_tx, NULL, NULL); - if (tx) { - if (dma_wait_for_async_tx(tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for tx\n", - __func__); - async_tx_ack(tx); - } + async_tx_quiesce(&tx); *result = page_is_zero(dest, offset, len) ? 0 : 1; - tx = NULL; - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index eb640f0acfa..9f0e7bd5bdc 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -152,4 +152,6 @@ struct dma_async_tx_descriptor * async_trigger_callback(enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_fn_param); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ -- cgit v1.2.3-70-g09d2 From 3dce01713723bbcc92562bd4488e8b840a4f786c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 17 Jul 2008 17:59:55 -0700 Subject: async_tx: remove depend_tx from async_tx_sync_epilog All callers of async_tx_sync_epilog have called async_tx_quiesce on the depend_tx, so async_tx_sync_epilog need only call the callback to complete the operation. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_memset.c | 2 +- crypto/async_tx/async_tx.c | 2 +- crypto/async_tx/async_xor.c | 7 +++---- include/linux/async_tx.h | 9 +-------- 5 files changed, 7 insertions(+), 15 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 06a7f4be973..ddccfb01c41 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -83,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(src_buf, KM_USER1); - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index d48ed22ed1c..5b5eb99bb24 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -76,7 +76,7 @@ async_memset(struct page *dest, int val, unsigned int offset, memset(dest_buf, val, len); - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 78a61e7f631..35869a37a6f 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -609,7 +609,7 @@ async_trigger_callback(enum async_tx_flags flags, /* wait for any prerequisite operations */ async_tx_quiesce(&depend_tx); - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 689ecce73ee..65974c6d3d7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -121,7 +121,6 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, static void do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { int i; @@ -150,7 +149,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, src_off += xor_src_cnt; } - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } /** @@ -204,7 +203,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, async_tx_quiesce(&depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, - flags, depend_tx, cb_fn, cb_param); + flags, cb_fn, cb_param); return NULL; } @@ -287,7 +286,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, *result = page_is_zero(dest, offset, len) ? 0 : 1; - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 9f0e7bd5bdc..0f50d4cc436 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, /** * async_tx_sync_epilog - actions to take if an operation is run synchronously - * @flags: async_tx flags - * @depend_tx: transaction depends on depend_tx * @cb_fn: function to call when the transaction completes * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(unsigned long flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) { if (cb_fn) cb_fn(cb_fn_param); - - if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) - async_tx_ack(depend_tx); } void -- cgit v1.2.3-70-g09d2