From 07f2211e4fbce6990722d78c4f04225da9c0e9cf Mon Sep 17 00:00:00 2001
From: Dan Williams <dan.j.williams@intel.com>
Date: Mon, 5 Jan 2009 17:14:31 -0700
Subject: dmaengine: remove dependency on async_tx

async_tx.ko is a consumer of dma channels.  A circular dependency arises
if modules in drivers/dma rely on common code in async_tx.ko.  It
prevents either module from being unloaded.

Move dma_wait_for_async_tx and async_tx_run_dependencies to dmaeninge.o
where they should have been from the beginning.

Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 include/linux/async_tx.h | 15 ---------------
 1 file changed, 15 deletions(-)

(limited to 'include/linux/async_tx.h')

diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 0f50d4cc436..1c816775f13 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -60,8 +60,6 @@ enum async_tx_flags {
 
 #ifdef CONFIG_DMA_ENGINE
 void async_tx_issue_pending_all(void);
-enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
-void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 #include <asm/async_tx.h>
 #else
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void)
 	do { } while (0);
 }
 
-static inline enum dma_status
-dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
-{
-	return DMA_SUCCESS;
-}
-
-static inline void
-async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
-	struct dma_chan *host_chan)
-{
-	do { } while (0);
-}
-
 static inline struct dma_chan *
 async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
 	enum dma_transaction_type tx_type, struct page **dst, int dst_count,
-- 
cgit v1.2.3-70-g09d2


From 2ba05622b8b143b0c95968ba59bddfbd6d2f2559 Mon Sep 17 00:00:00 2001
From: Dan Williams <dan.j.williams@intel.com>
Date: Tue, 6 Jan 2009 11:38:14 -0700
Subject: dmaengine: provide a common 'issue_pending_all' implementation

async_tx and net_dma each have open-coded versions of issue_pending_all,
so provide a common routine in dmaengine.

The implementation needs to walk the global device list, so implement
rcu to allow dma_issue_pending_all to run lockless.  Clients protect
themselves from channel removal events by holding a dmaengine reference.

Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 crypto/async_tx/async_tx.c | 12 ------------
 drivers/dma/dmaengine.c    | 27 ++++++++++++++++++++++++---
 include/linux/async_tx.h   |  2 +-
 include/linux/dmaengine.h  |  1 +
 net/core/dev.c             |  9 +--------
 5 files changed, 27 insertions(+), 24 deletions(-)

(limited to 'include/linux/async_tx.h')

diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index b88bb1f608f..2cdf7a0867b 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock);
 
 static LIST_HEAD(async_tx_master_list);
 
-/* async_tx_issue_pending_all - start all transactions on all channels */
-void async_tx_issue_pending_all(void)
-{
-	struct dma_chan_ref *ref;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-		ref->chan->device->device_issue_pending(ref->chan);
-	rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
-
 static void
 free_dma_chan_ref(struct rcu_head *rcu)
 {
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 87a8cd4791e..418eca28d47 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -70,6 +70,7 @@
 #include <linux/rcupdate.h>
 #include <linux/mutex.h>
 #include <linux/jiffies.h>
+#include <linux/rculist.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
 static LIST_HEAD(dma_device_list);
@@ -365,6 +366,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 }
 EXPORT_SYMBOL(dma_find_channel);
 
+/**
+ * dma_issue_pending_all - flush all pending operations across all channels
+ */
+void dma_issue_pending_all(void)
+{
+	struct dma_device *device;
+	struct dma_chan *chan;
+
+	WARN_ONCE(dmaengine_ref_count == 0,
+		  "client called %s without a reference", __func__);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(device, &dma_device_list, global_node)
+		list_for_each_entry(chan, &device->channels, device_node)
+			if (chan->client_count)
+				device->device_issue_pending(chan);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(dma_issue_pending_all);
+
 /**
  * nth_chan - returns the nth channel of the given capability
  * @cap: capability to match
@@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client)
 			err = dma_chan_get(chan);
 			if (err == -ENODEV) {
 				/* module removed before we could use it */
-				list_del_init(&device->global_node);
+				list_del_rcu(&device->global_node);
 				break;
 			} else if (err)
 				pr_err("dmaengine: failed to get %s: (%d)\n",
@@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device)
 				goto err_out;
 			}
 		}
-	list_add_tail(&device->global_node, &dma_device_list);
+	list_add_tail_rcu(&device->global_node, &dma_device_list);
 	dma_channel_rebalance();
 	mutex_unlock(&dma_list_mutex);
 
@@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device)
 	struct dma_chan *chan;
 
 	mutex_lock(&dma_list_mutex);
-	list_del(&device->global_node);
+	list_del_rcu(&device->global_node);
 	dma_channel_rebalance();
 	mutex_unlock(&dma_list_mutex);
 
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 1c816775f13..45f6297821b 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -59,7 +59,7 @@ enum async_tx_flags {
 };
 
 #ifdef CONFIG_DMA_ENGINE
-void async_tx_issue_pending_all(void);
+#define async_tx_issue_pending_all dma_issue_pending_all
 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 #include <asm/async_tx.h>
 #else
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b466f02e243..57a43adfc39 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -471,6 +471,7 @@ int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+void dma_issue_pending_all(void);
 
 /* --- Helper iov-locking functions --- */
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 09c66a449da..e40b0d57f8f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2635,14 +2635,7 @@ out:
 	 * There may not be any more sk_buffs coming right now, so push
 	 * any pending DMA copies to hardware
 	 */
-	if (!cpus_empty(net_dma.channel_mask)) {
-		int chan_idx;
-		for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
-			struct dma_chan *chan = net_dma.channels[chan_idx];
-			if (chan)
-				dma_async_memcpy_issue_pending(chan);
-		}
-	}
+	dma_issue_pending_all();
 #endif
 
 	return;
-- 
cgit v1.2.3-70-g09d2