summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-07-17 17:59:55 -0700
committerDan Williams <dan.j.williams@intel.com>2008-07-17 17:59:55 -0700
commitd2c52b7983b95bb3fc2a784e479f832f142d4523 (patch)
tree7bc37e7438cee523496674adcd97034df764af47 /crypto
parent669ab0b210f9bd15d94d4d6a49ae13366a85e4da (diff)
async_tx: export async_tx_quiesce
Replace open coded "wait and acknowledge" instances with async_tx_quiesce. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c10
-rw-r--r--crypto/async_tx/async_memset.c10
-rw-r--r--crypto/async_tx/async_tx.c29
-rw-r--r--crypto/async_tx/async_xor.c37
4 files changed, 24 insertions, 62 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index a5eda80e842..06a7f4be973 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 27a97dc90a7..d48ed22ed1c 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -72,15 +72,7 @@ async_memset(struct page *dest, int val, unsigned int offset,
dest_buf = (void *) (((char *) page_address(dest)) + offset);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
memset(dest_buf, val, len);
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 9325c61208a..78a61e7f631 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -607,15 +607,7 @@ async_trigger_callback(enum async_tx_flags flags,
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}
@@ -624,6 +616,25 @@ async_trigger_callback(enum async_tx_flags flags,
}
EXPORT_SYMBOL_GPL(async_trigger_callback);
+/**
+ * async_tx_quiesce - ensure tx is complete and freeable upon return
+ * @tx - transaction to quiesce
+ */
+void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
+{
+ if (*tx) {
+ /* if ack is already set then we cannot be sure
+ * we are referring to the correct operation
+ */
+ BUG_ON(async_tx_test_ack(*tx));
+ if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
+ panic("DMA_ERROR waiting for transaction\n");
+ async_tx_ack(*tx);
+ *tx = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(async_tx_quiesce);
+
module_init(async_tx_init);
module_exit(async_tx_exit);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 19d16e452bc..689ecce73ee 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,24 +30,6 @@
#include <linux/raid/xor.h>
#include <linux/async_tx.h>
-/**
- * async_tx_quiesce - ensure tx is complete and freeable upon return
- * @tx - transaction to quiesce
- */
-static void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
-{
- if (*tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
- panic("DMA_ERROR waiting for transaction\n");
- async_tx_ack(*tx);
- *tx = NULL;
- }
-}
-
/* do_async_xor - dma map the pages and perform the xor with an engine.
* This routine is marked __always_inline so it can be compiled away
* when CONFIG_DMA_ENGINE=n
@@ -219,15 +201,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
}
/* wait for any prerequisite operations */
- if (depend_tx) {
- /* if ack is already set then we cannot be sure
- * we are referring to the correct operation
- */
- BUG_ON(async_tx_test_ack(depend_tx));
- if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for depend_tx\n",
- __func__);
- }
+ async_tx_quiesce(&depend_tx);
do_sync_xor(dest, src_list, offset, src_cnt, len,
flags, depend_tx, cb_fn, cb_param);
@@ -309,17 +283,10 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
depend_tx, NULL, NULL);
- if (tx) {
- if (dma_wait_for_async_tx(tx) == DMA_ERROR)
- panic("%s: DMA_ERROR waiting for tx\n",
- __func__);
- async_tx_ack(tx);
- }
+ async_tx_quiesce(&tx);
*result = page_is_zero(dest, offset, len) ? 0 : 1;
- tx = NULL;
-
async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
}