summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-09 11:52:14 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-09 11:52:14 -0800
commitd9e8a3a5b8298a3c814ed37ac5756e6f67b6be41 (patch)
treeffa1cf8b367b3f32155f6336d7b86b781a368019 /drivers
parent2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff)
parentb9bdcbba010c2e49c8f837ea7a49fe006b636f41 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (22 commits) ioat: fix self test for multi-channel case dmaengine: bump initcall level to arch_initcall dmaengine: advertise all channels on a device to dma_filter_fn dmaengine: use idr for registering dma device numbers dmaengine: add a release for dma class devices and dependent infrastructure ioat: do not perform removal actions at shutdown iop-adma: enable module removal iop-adma: kill debug BUG_ON iop-adma: let devm do its job, don't duplicate free dmaengine: kill enum dma_state_client dmaengine: remove 'bigref' infrastructure dmaengine: kill struct dma_client and supporting infrastructure dmaengine: replace dma_async_client_register with dmaengine_get atmel-mci: convert to dma_request_channel and down-level dma_slave dmatest: convert to dma_request_channel dmaengine: introduce dma_request_channel and private channels net_dma: convert to dma_find_channel dmaengine: provide a common 'issue_pending_all' implementation dmaengine: centralize channel allocation, introduce dma_find_channel dmaengine: up-level reference counting to the module level ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dca/dca-core.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c778
-rw-r--r--drivers/dma/dmatest.c129
-rw-r--r--drivers/dma/dw_dmac.c119
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/ioat.c92
-rw-r--r--drivers/dma/ioat_dma.c18
-rw-r--r--drivers/dma/iop-adma.c30
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/mmc/host/atmel-mci.c103
11 files changed, 757 insertions, 532 deletions
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index d883e1b8bb8..55433849bfa 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
dca_sysfs_exit();
}
-subsys_initcall(dca_init);
+arch_initcall(dca_init);
module_exit(dca_exit);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 904e57558bb..e34b0642081 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,7 +33,6 @@ config INTEL_IOATDMA
config INTEL_IOP_ADMA
tristate "Intel IOP ADMA support"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
- select ASYNC_CORE
select DMA_ENGINE
help
Enable support for the Intel(R) IOP Series RAID engines.
@@ -59,7 +58,6 @@ config FSL_DMA
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
- select ASYNC_CORE
select DMA_ENGINE
---help---
Enable support for the Marvell XOR engine.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 65799651737..403dbe78112 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,32 +31,18 @@
*
* LOCKING:
*
- * The subsystem keeps two global lists, dma_device_list and dma_client_list.
- * Both of these are protected by a mutex, dma_list_mutex.
+ * The subsystem keeps a global list of dma_device structs it is protected by a
+ * mutex, dma_list_mutex.
+ *
+ * A subsystem can get access to a channel by calling dmaengine_get() followed
+ * by dma_find_channel(), or if it has need for an exclusive channel it can call
+ * dma_request_channel(). Once a channel is allocated a reference is taken
+ * against its corresponding driver to disable removal.
*
* Each device has a channels list, which runs unlocked but is never modified
* once the device is registered, it's just setup by the driver.
*
- * Each client is responsible for keeping track of the channels it uses. See
- * the definition of dma_event_callback in dmaengine.h.
- *
- * Each device has a kref, which is initialized to 1 when the device is
- * registered. A kref_get is done for each device registered. When the
- * device is released, the corresponding kref_put is done in the release
- * method. Every time one of the device's channels is allocated to a client,
- * a kref_get occurs. When the channel is freed, the corresponding kref_put
- * happens. The device's release function does a completion, so
- * unregister_device does a remove event, device_unregister, a kref_put
- * for the first reference, then waits on the completion for all other
- * references to finish.
- *
- * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
- * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
- * signals that it wants to use a channel, and dma_chan_put is called when
- * a channel is removed or a client using it is unregistered. A client can
- * take extra references per outstanding transaction, as is the case with
- * the NET DMA client. The release function does a kref_put on the device.
- * -ChrisL, DanW
+ * See Documentation/dmaengine.txt for more details
*/
#include <linux/init.h>
@@ -70,54 +56,85 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
+#include <linux/rculist.h>
+#include <linux/idr.h>
static DEFINE_MUTEX(dma_list_mutex);
static LIST_HEAD(dma_device_list);
-static LIST_HEAD(dma_client_list);
+static long dmaengine_ref_count;
+static struct idr dma_idr;
/* --- sysfs implementation --- */
+/**
+ * dev_to_dma_chan - convert a device pointer to the its sysfs container object
+ * @dev - device node
+ *
+ * Must be called under dma_list_mutex
+ */
+static struct dma_chan *dev_to_dma_chan(struct device *dev)
+{
+ struct dma_chan_dev *chan_dev;
+
+ chan_dev = container_of(dev, typeof(*chan_dev), device);
+ return chan_dev->chan;
+}
+
static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dma_chan *chan = to_dma_chan(dev);
+ struct dma_chan *chan;
unsigned long count = 0;
int i;
+ int err;
- for_each_possible_cpu(i)
- count += per_cpu_ptr(chan->local, i)->memcpy_count;
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan) {
+ for_each_possible_cpu(i)
+ count += per_cpu_ptr(chan->local, i)->memcpy_count;
+ err = sprintf(buf, "%lu\n", count);
+ } else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
- return sprintf(buf, "%lu\n", count);
+ return err;
}
static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dma_chan *chan = to_dma_chan(dev);
+ struct dma_chan *chan;
unsigned long count = 0;
int i;
+ int err;
- for_each_possible_cpu(i)
- count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan) {
+ for_each_possible_cpu(i)
+ count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+ err = sprintf(buf, "%lu\n", count);
+ } else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
- return sprintf(buf, "%lu\n", count);
+ return err;
}
static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dma_chan *chan = to_dma_chan(dev);
- int in_use = 0;
-
- if (unlikely(chan->slow_ref) &&
- atomic_read(&chan->refcount.refcount) > 1)
- in_use = 1;
- else {
- if (local_read(&(per_cpu_ptr(chan->local,
- get_cpu())->refcount)) > 0)
- in_use = 1;
- put_cpu();
- }
+ struct dma_chan *chan;
+ int err;
- return sprintf(buf, "%d\n", in_use);
+ mutex_lock(&dma_list_mutex);
+ chan = dev_to_dma_chan(dev);
+ if (chan)
+ err = sprintf(buf, "%d\n", chan->client_count);
+ else
+ err = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+
+ return err;
}
static struct device_attribute dma_attrs[] = {
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = {
__ATTR_NULL
};
-static void dma_async_device_cleanup(struct kref *kref);
-
-static void dma_dev_release(struct device *dev)
+static void chan_dev_release(struct device *dev)
{
- struct dma_chan *chan = to_dma_chan(dev);
- kref_put(&chan->device->refcount, dma_async_device_cleanup);
+ struct dma_chan_dev *chan_dev;
+
+ chan_dev = container_of(dev, typeof(*chan_dev), device);
+ if (atomic_dec_and_test(chan_dev->idr_ref)) {
+ mutex_lock(&dma_list_mutex);
+ idr_remove(&dma_idr, chan_dev->dev_id);
+ mutex_unlock(&dma_list_mutex);
+ kfree(chan_dev->idr_ref);
+ }
+ kfree(chan_dev);
}
static struct class dma_devclass = {
.name = "dma",
.dev_attrs = dma_attrs,
- .dev_release = dma_dev_release,
+ .dev_release = chan_dev_release,
};
/* --- client and device registration --- */
-#define dma_chan_satisfies_mask(chan, mask) \
- __dma_chan_satisfies_mask((chan), &(mask))
+#define dma_device_satisfies_mask(device, mask) \
+ __dma_device_satisfies_mask((device), &(mask))
static int
-__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
{
dma_cap_mask_t has;
- bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
+ bitmap_and(has.bits, want->bits, device->cap_mask.bits,
DMA_TX_TYPE_END);
return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
}
+static struct module *dma_chan_to_owner(struct dma_chan *chan)
+{
+ return chan->device->dev->driver->owner;
+}
+
/**
- * dma_client_chan_alloc - try to allocate channels to a client
- * @client: &dma_client
+ * balance_ref_count - catch up the channel reference count
+ * @chan - channel to balance ->client_count versus dmaengine_ref_count
*
- * Called with dma_list_mutex held.
+ * balance_ref_count must be called under dma_list_mutex
*/
-static void dma_client_chan_alloc(struct dma_client *client)
+static void balance_ref_count(struct dma_chan *chan)
{
- struct dma_device *device;
- struct dma_chan *chan;
- int desc; /* allocated descriptor count */
- enum dma_state_client ack;
+ struct module *owner = dma_chan_to_owner(chan);
- /* Find a channel */
- list_for_each_entry(device, &dma_device_list, global_node) {
- /* Does the client require a specific DMA controller? */
- if (client->slave && client->slave->dma_dev
- && client->slave->dma_dev != device->dev)
- continue;
+ while (chan->client_count < dmaengine_ref_count) {
+ __module_get(owner);
+ chan->client_count++;
+ }
+}
- list_for_each_entry(chan, &device->channels, device_node) {
- if (!dma_chan_satisfies_mask(chan, client->cap_mask))
- continue;
+/**
+ * dma_chan_get - try to grab a dma channel's parent driver module
+ * @chan - channel to grab
+ *
+ * Must be called under dma_list_mutex
+ */
+static int dma_chan_get(struct dma_chan *chan)
+{
+ int err = -ENODEV;
+ struct module *owner = dma_chan_to_owner(chan);
+
+ if (chan->client_count) {
+ __module_get(owner);
+ err = 0;
+ } else if (try_module_get(owner))
+ err = 0;
+
+ if (err == 0)
+ chan->client_count++;
+
+ /* allocate upon first client reference */
+ if (chan->client_count == 1 && err == 0) {
+ int desc_cnt = chan->device->device_alloc_chan_resources(chan);
+
+ if (desc_cnt < 0) {
+ err = desc_cnt;
+ chan->client_count = 0;
+ module_put(owner);
+ } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+ balance_ref_count(chan);
+ }
- desc = chan->device->device_alloc_chan_resources(
- chan, client);
- if (desc >= 0) {
- ack = client->event_callback(client,
- chan,
- DMA_RESOURCE_AVAILABLE);
+ return err;
+}
- /* we are done once this client rejects
- * an available resource
- */
- if (ack == DMA_ACK) {
- dma_chan_get(chan);
- chan->client_count++;
- } else if (ack == DMA_NAK)
- return;
- }
- }
- }
+/**
+ * dma_chan_put - drop a reference to a dma channel's parent driver module
+ * @chan - channel to release
+ *
+ * Must be called under dma_list_mutex
+ */
+static void dma_chan_put(struct dma_chan *chan)
+{
+ if (!chan->client_count)
+ return; /* this channel failed alloc_chan_resources */
+ chan->client_count--;
+ module_put(dma_chan_to_owner(chan));
+ if (chan->client_count == 0)
+ chan->device->device_free_chan_resources(chan);
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
EXPORT_SYMBOL(dma_sync_wait);
/**
- * dma_chan_cleanup - release a DMA channel's resources
- * @kref: kernel reference structure that contains the DMA channel device
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+ struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
*/
-void dma_chan_cleanup(struct kref *kref)
+static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
{
- struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
- chan->device->device_free_chan_resources(chan);
- kref_put(&chan->device->refcount, dma_async_device_cleanup);
+ enum dma_transaction_type cap;
+ int err = 0;
+
+ bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+ /* 'interrupt', 'private', and 'slave' are channel capabilities,
+ * but are not associated with an operation so they do not need
+ * an entry in the channel_table
+ */
+ clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+ clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+ clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+ for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+ channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+ if (!channel_table[cap]) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (err) {
+ pr_err("dmaengine: initialization failure\n");
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ if (channel_table[cap])
+ free_percpu(channel_table[cap]);
+ }
+
+ return err;
}
-EXPORT_SYMBOL(dma_chan_cleanup);
+arch_initcall(dma_channel_table_init);
-static void dma_chan_free_rcu(struct rcu_head *rcu)
+/**
+ * dma_find_channel - find a channel to carry out the operation
+ * @tx_type: transaction type
+ */
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
- struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
- int bias = 0x7FFFFFFF;
- int i;
- for_each_possible_cpu(i)
- bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
- atomic_sub(bias, &chan->refcount.refcount);
- kref_put(&chan->refcount, dma_chan_cleanup);
+ struct dma_chan *chan;
+ int cpu;
+
+ WARN_ONCE(dmaengine_ref_count == 0,
+ "client called %s without a reference", __func__);
+
+ cpu = get_cpu();
+ chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
+ put_cpu();
+
+ return chan;
}
+EXPORT_SYMBOL(dma_find_channel);
-static void dma_chan_release(struct dma_chan *chan)
+/**
+ * dma_issue_pending_all - flush all pending operations across all channels
+ */
+void dma_issue_pending_all(void)
{
- atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
- chan->slow_ref = 1;
- call_rcu(&chan->rcu, dma_chan_free_rcu);
+ struct dma_device *device;
+ struct dma_chan *chan;
+
+ WARN_ONCE(dmaengine_ref_count == 0,
+ "client called %s without a reference", __func__);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ if (chan->client_count)
+ device->device_issue_pending(chan);
+ }
+ rcu_read_unlock();
}
+EXPORT_SYMBOL(dma_issue_pending_all);
/**
- * dma_chans_notify_available - broadcast available channels to the clients
+ * nth_chan - returns the nth channel of the given capability
+ * @cap: capability to match
+ * @n: nth channel desired
+ *
+ * Defaults to returning the channel with the desired capability and the
+ * lowest reference count when 'n' cannot be satisfied. Must be called
+ * under dma_list_mutex.
*/
-static void dma_clients_notify_available(void)
+static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
{
- struct dma_client *client;
+ struct dma_device *device;
+ struct dma_chan *chan;
+ struct dma_chan *ret = NULL;
+ struct dma_chan *min = NULL;
- mutex_lock(&dma_list_mutex);
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (!dma_has_cap(cap, device->cap_mask) ||
+ dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (!chan->client_count)
+ continue;
+ if (!min)
+ min = chan;
+ else if (chan->table_count < min->table_count)
+ min = chan;
+
+ if (n-- == 0) {
+ ret = chan;
+ break; /* done */
+ }
+ }
+ if (ret)
+ break; /* done */
+ }
- list_for_each_entry(client, &dma_client_list, global_node)
- dma_client_chan_alloc(client);
+ if (!ret)
+ ret = min;
- mutex_unlock(&dma_list_mutex);
+ if (ret)
+ ret->table_count++;
+
+ return ret;
}
/**
- * dma_chans_notify_available - tell the clients that a channel is going away
- * @chan: channel on its way out
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case. Must be called under
+ * dma_list_mutex.
*/
-static void dma_clients_notify_removed(struct dma_chan *chan)
+static void dma_channel_rebalance(void)
{
- struct dma_client *client;
- enum dma_state_client ack;
+ struct dma_chan *chan;
+ struct dma_device *device;
+ int cpu;
+ int cap;
+ int n;
- mutex_lock(&dma_list_mutex);
+ /* undo the last distribution */
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ chan->table_count = 0;
+ }
- list_for_each_entry(client, &dma_client_list, global_node) {
- ack = client->event_callback(client, chan,
- DMA_RESOURCE_REMOVED);
+ /* don't populate the channel_table if no clients are available */
+ if (!dmaengine_ref_count)
+ return;
- /* client was holding resources for this channel so
- * free it
- */
- if (ack == DMA_ACK) {
- dma_chan_put(chan);
- chan->client_count--;
+ /* redistribute available channels */
+ n = 0;
+ for_each_dma_cap_mask(cap, dma_cap_mask_all)
+ for_each_online_cpu(cpu) {
+ if (num_possible_cpus() > 1)
+ chan = nth_chan(cap, n++);
+ else
+ chan = nth_chan(cap, -1);
+
+ per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+ }
+}
+
+static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+ dma_filter_fn fn, void *fn_param)
+{
+ struct dma_chan *chan;
+
+ if (!__dma_device_satisfies_mask(dev, mask)) {
+ pr_debug("%s: wrong capabilities\n", __func__);
+ return NULL;
+ }
+ /* devices with multiple channels need special handling as we need to
+ * ensure that all channels are either private or public.
+ */
+ if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
+ list_for_each_entry(chan, &dev->channels, device_node) {
+ /* some channels are already publicly allocated */
+ if (chan->client_count)
+ return NULL;
}
+
+ list_for_each_entry(chan, &dev->channels, device_node) {
+ if (chan->client_count) {
+ pr_debug("%s: %s busy\n",
+ __func__, dma_chan_name(chan));
+ continue;
+ }
+ if (fn && !fn(chan, fn_param)) {
+ pr_debug("%s: %s filter said false\n",
+ __func__, dma_chan_name(chan));
+ continue;
+ }
+ return chan;
}
- mutex_unlock(&dma_list_mutex);
+ return NULL;
}
/**
- * dma_async_client_register - register a &dma_client
- * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
+ * dma_request_channel - try to allocate an exclusive channel
+ * @mask: capabilities that the channel must satisfy
+ * @fn: optional callback to disposition available channels
+ * @fn_param: opaque parameter to pass to dma_filter_fn
*/
-void dma_async_client_register(struct dma_client *client)
+struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
{
- /* validate client data */
- BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
- !client->slave);
+ struct dma_device *device, *_d;
+ struct dma_chan *chan = NULL;
+ int err;
+ /* Find a channel */
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+ chan = private_candidate(mask, device, fn, fn_param);
+ if (chan) {
+ /* Found a suitable channel, try to grab, prep, and
+ * return it. We first set DMA_PRIVATE to disable
+ * balance_ref_count as this channel will not be
+ * published in the general-purpose allocator
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ err = dma_chan_get(chan);
+
+ if (err == -ENODEV) {
+ pr_debug("%s: %s module removed\n", __func__,
+ dma_chan_name(chan));
+ list_del_rcu(&device->global_node);
+ } else if (err)
+ pr_err("dmaengine: failed to get %s: (%d)\n",
+ dma_chan_name(chan), err);
+ else
+ break;
+ chan = NULL;
+ }
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
+ chan ? dma_chan_name(chan) : NULL);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(__dma_request_channel);
+
+void dma_release_channel(struct dma_chan *chan)
+{
mutex_lock(&dma_list_mutex);
- list_add_tail(&client->global_node, &dma_client_list);
+ WARN_ONCE(chan->client_count != 1,
+ "chan reference count %d != 1\n", chan->client_count);
+ dma_chan_put(chan);
mutex_unlock(&dma_list_mutex);
}
-EXPORT_SYMBOL(dma_async_client_register);
+EXPORT_SYMBOL_GPL(dma_release_channel);
/**
- * dma_async_client_unregister - unregister a client and free the &dma_client
- * @client: &dma_client to free
- *
- * Force frees any allocated DMA channels, frees the &dma_client memory
+ * dmaengine_get - register interest in dma_channels
*/
-void dma_async_client_unregister(struct dma_client *client)
+void dmaengine_get(void)
{
- struct dma_device *device;
+ struct dma_device *device, *_d;
struct dma_chan *chan;
- enum dma_state_client ack;
-
- if (!client)
- return;
+ int err;
mutex_lock(&dma_list_mutex);
- /* free all channels the client is holding */
- list_for_each_entry(device, &dma_device_list, global_node)
- list_for_each_entry(chan, &device->channels, device_node) {
- ack = client->event_callback(client, chan,
- DMA_RESOURCE_REMOVED);
+ dmaengine_ref_count++;
- if (ack == DMA_ACK) {
- dma_chan_put(chan);
- chan->client_count--;
- }
+ /* try to grab channels */
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node) {
+ err = dma_chan_get(chan);
+ if (err == -ENODEV) {
+ /* module removed before we could use it */
+ list_del_rcu(&device->global_node);
+ break;
+ } else if (err)
+ pr_err("dmaengine: failed to get %s: (%d)\n",
+ dma_chan_name(chan), err);
}
+ }
- list_del(&client->global_node);
+ /* if this is the first reference and there were channels
+ * waiting we need to rebalance to get those channels
+ * incorporated into the channel table
+ */
+ if (dmaengine_ref_count == 1)
+ dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
}
-EXPORT_SYMBOL(dma_async_client_unregister);
+EXPORT_SYMBOL(dmaengine_get);
/**
- * dma_async_client_chan_request - send all available channels to the
- * client that satisfy the capability mask
- * @client - requester
+ * dmaengine_put - let dma drivers be removed when ref_count == 0
*/
-void dma_async_client_chan_request(struct dma_client *client)
+void dmaengine_put(void)
{
+ struct dma_device *device;
+ struct dma_chan *chan;
+
mutex_lock(&dma_list_mutex);
- dma_client_chan_alloc(client);
+ dmaengine_ref_count--;
+ BUG_ON(dmaengine_ref_count < 0);
+ /* drop channel references */
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ continue;
+ list_for_each_entry(chan, &device->channels, device_node)
+ dma_chan_put(chan);
+ }
mutex_unlock(&dma_list_mutex);
}
-EXPORT_SYMBOL(dma_async_client_chan_request);
+EXPORT_SYMBOL(dmaengine_put);
/**
* dma_async_device_register - registers DMA devices found
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request);
*/
int dma_async_device_register(struct dma_device *device)
{
- static int id;
int chancnt = 0, rc;
struct dma_chan* chan;
+ atomic_t *idr_ref;
if (!device)
return -ENODEV;
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
- init_completion(&device->done);
- kref_init(&device->refcount);
-
+ idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+ if (!idr_ref)
+ return -ENOMEM;
+ atomic_set(idr_ref, 0);
+ idr_retry:
+ if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+ return -ENOMEM;
mutex_lock(&dma_list_mutex);
- device->dev_id = id++;
+ rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
mutex_unlock(&dma_list_mutex);
+ if (rc == -EAGAIN)
+ goto idr_retry;
+ else if (rc != 0)
+ return rc;
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
continue;
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ if (chan->dev == NULL) {
+ free_percpu(chan->local);
+ continue;
+ }
chan->chan_id = chancnt++;
- chan->dev.class = &dma_devclass;
- chan->dev.parent = device->dev;
- dev_set_name(&chan->dev, "dma%dchan%d",
+ chan->dev->device.class = &dma_devclass;
+ chan->dev->device.parent = device->dev;
+ chan->dev->chan = chan;
+ chan->dev->idr_ref = idr_ref;
+ chan->dev->dev_id = device->dev_id;
+ atomic_inc(idr_ref);
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id);
- rc = device_register(&chan->dev);
+ rc = device_register(&chan->dev->device);
if (rc) {
- chancnt--;
free_percpu(chan->local);
chan->local = NULL;
goto err_out;
}
-
- /* One for the channel, one of the class device */
- kref_get(&device->refcount);
- kref_get(&device->refcount);
- kref_init(&chan->refcount);
chan->client_count = 0;
- chan->slow_ref = 0;
- INIT_RCU_HEAD(&chan->rcu);
}
+ device->chancnt = chancnt;
mutex_lock(&dma_list_mutex);
- list_add_tail(&device->global_node, &dma_device_list);
+ /* take references on public channels */
+ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ list_for_each_entry(chan, &device->channels, device_node) {
+ /* if clients are already waiting for channels we need
+ * to take references on their behalf
+ */
+ if (dma_chan_get(chan) == -ENODEV) {
+ /* note we can only get here for the first
+ * channel as the remaining channels are
+ * guaranteed to get a reference
+ */
+ rc = -ENODEV;
+ mutex_unlock(&dma_list_mutex);
+ goto err_out;
+ }
+ }
+ list_add_tail_rcu(&device->global_node, &dma_device_list);
+ dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
- dma_clients_notify_available();
-
return 0;
err_out:
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->local == NULL)
continue;
- kref_put(&device->refcount, dma_async_device_cleanup);
- device_unregister(&chan->dev);
- chancnt--;
+ mutex_lock(&dma_list_mutex);
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
return rc;
@@ -444,37 +725,30 @@ err_out:
EXPORT_SYMBOL(dma_async_device_register);
/**
- * dma_async_device_cleanup - function called when all references are released
- * @kref: kernel reference object
- */
-static void dma_async_device_cleanup(struct kref *kref)
-{
- struct dma_device *device;
-
- device = container_of(kref, struct dma_device, refcount);
- complete(&device->done);
-}
-
-/**
- * dma_async_device_unregister - unregisters DMA devices
+ * dma_async_device_unregister - unregister a DMA device
* @device: &dma_device
+ *
+ * This routine is called by dma driver exit routines, dmaengine holds module
+ * references to prevent it being called while channels are in use.
*/
void dma_async_device_unregister(struct dma_device *device)
{
struct dma_chan *chan;
mutex_lock(&dma_list_mutex);
- list_del(&device->global_node);
+ list_del_rcu(&device->global_node);
+ dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
list_for_each_entry(chan, &device->channels, device_node) {
- dma_clients_notify_removed(chan);
- device_unregister(&chan->dev);
- dma_chan_release(chan);
+ WARN_ONCE(chan->client_count,
+ "%s called while %d clients hold a reference\n",
+ __func__, chan->client_count);
+ mutex_lock(&dma_list_mutex);
+ chan->dev->chan = NULL;
+ mutex_unlock(&dma_list_mutex);
+ device_unregister(&chan->dev->device);
}
-
- kref_put(&device->refcount, dma_async_device_cleanup);
- wait_for_completion(&device->done);
}
EXPORT_SYMBOL(dma_async_device_unregister);
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+/* dma_wait_for_async_tx - spin wait for a transaction to complete
+ * @tx: in-flight transaction to wait on
+ *
+ * This routine assumes that tx was obtained from a call to async_memcpy,
+ * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
+ * and submitted). Walking the parent chain is only meant to cover for DMA
+ * drivers that do not implement the DMA_INTERRUPT capability and may race with
+ * the driver's descriptor cleanup routine.
+ */
+enum dma_status
+dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+ enum dma_status status;
+ struct dma_async_tx_descriptor *iter;
+ struct dma_async_tx_descriptor *parent;
+
+ if (!tx)
+ return DMA_SUCCESS;
+
+ WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
+ " %s\n", __func__, dma_chan_name(tx->chan));
+
+ /* poll through the dependency chain, return when tx is complete */
+ do {
+ iter = tx;
+
+ /* find the root of the unsubmitted dependency chain */
+ do {
+ parent = iter->parent;
+ if (!parent)
+ break;
+ else
+ iter = parent;
+ } while (parent);
+
+ /* there is a small window for ->parent == NULL and
+ * ->cookie == -EBUSY
+ */
+ while (iter->cookie == -EBUSY)
+ cpu_relax();
+
+ status = dma_sync_wait(iter->chan, iter->cookie);
+ } while (status == DMA_IN_PROGRESS || (iter != tx));
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
+
+/* dma_run_dependencies - helper routine for dma drivers to process
+ * (start) dependent operations on their target channel
+ * @tx: transaction with dependencies
+ */
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_async_tx_descriptor *dep = tx->next;
+ struct dma_async_tx_descriptor *dep_next;
+ struct dma_chan *chan;
+
+ if (!dep)
+ return;
+
+ chan = dep->chan;
+
+ /* keep submitting up until a channel switch is detected
+ * in that case we will be called again as a result of
+ * processing the interrupt from async_tx_channel_switch
+ */
+ for (; dep; dep = dep_next) {
+ spin_lock_bh(&dep->lock);
+ dep->parent = NULL;
+ dep_next = dep->next;
+ if (dep_next && dep_next->chan == chan)
+ dep->next = NULL; /* ->next will be submitted */
+ else
+ dep_next = NULL; /* submit current dep and terminate */
+ spin_unlock_bh(&dep->lock);
+
+ dep->tx_submit(dep);
+ }
+
+ chan->device->device_issue_pending(chan);
+}
+EXPORT_SYMBOL_GPL(dma_run_dependencies);
+
static int __init dma_bus_init(void)
{
+ idr_init(&dma_idr);
mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
-subsys_initcall(dma_bus_init);
+arch_initcall(dma_bus_init);
+
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ed9636bfb54..3603f1ea5b2 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan,
static unsigned int max_channels;
module_param(max_channels, uint, S_IRUGO);
-MODULE_PARM_DESC(nr_channels,
+MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
/*
@@ -71,7 +71,7 @@ struct dmatest_chan {
/*
* These are protected by dma_list_mutex since they're only used by
- * the DMA client event callback
+ * the DMA filter function callback
*/
static LIST_HEAD(dmatest_channels);
static unsigned int nr_channels;
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan)
{
if (test_channel[0] == '\0')
return true;
- return strcmp(dev_name(&chan->dev), test_channel) == 0;
+ return strcmp(dma_chan_name(chan), test_channel) == 0;
}
static bool dmatest_match_device(struct dma_device *device)
@@ -215,7 +215,6 @@ static int dmatest_func(void *data)
smp_rmb();
chan = thread->chan;
- dma_chan_get(chan);
while (!kthread_should_stop()) {
total_tests++;
@@ -293,7 +292,6 @@ static int dmatest_func(void *data)
}
ret = 0;
- dma_chan_put(chan);
kfree(thread->dstbuf);
err_dstbuf:
kfree(thread->srcbuf);
@@ -319,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
kfree(dtc);
}
-static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_channel(struct dma_chan *chan)
{
struct dmatest_chan *dtc;
struct dmatest_thread *thread;
unsigned int i;
- /* Have we already been told about this channel? */
- list_for_each_entry(dtc, &dmatest_channels, node)
- if (dtc->chan == chan)
- return DMA_DUP;
-
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) {
- pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev));
- return DMA_NAK;
+ pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
+ return -ENOMEM;
}
dtc->chan = chan;
@@ -343,16 +336,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
pr_warning("dmatest: No memory for %s-test%u\n",
- dev_name(&chan->dev), i);
+ dma_chan_name(chan), i);
break;
}
thread->chan = dtc->chan;
smp_wmb();
thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
- dev_name(&chan->dev), i);
+ dma_chan_name(chan), i);
if (IS_ERR(thread->task)) {
pr_warning("dmatest: Failed to run thread %s-test%u\n",
- dev_name(&chan->dev), i);
+ dma_chan_name(chan), i);
kfree(thread);
break;
}
@@ -362,86 +355,62 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
list_add_tail(&thread->node, &dtc->threads);
}
- pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev));
+ pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
list_add_tail(&dtc->node, &dmatest_channels);
nr_channels++;
- return DMA_ACK;
-}
-
-static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
-{
- struct dmatest_chan *dtc, *_dtc;
-
- list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
- if (dtc->chan == chan) {
- list_del(&dtc->node);
- dmatest_cleanup_channel(dtc);
- pr_debug("dmatest: lost channel %s\n",
- dev_name(&chan->dev));
- return DMA_ACK;
- }
- }
-
- return DMA_DUP;
+ return 0;
}
-/*
- * Start testing threads as new channels are assigned to us, and kill
- * them when the channels go away.
- *
- * When we unregister the client, all channels are removed so this
- * will also take care of cleaning things up when the module is
- * unloaded.
- */
-static enum dma_state_client
-dmatest_event(struct dma_client *client, struct dma_chan *chan,
- enum dma_state state)
+static bool filter(struct dma_chan *chan, void *param)
{
- enum dma_state_client ack = DMA_NAK;
-
- switch (state) {
- case DMA_RESOURCE_AVAILABLE:
- if (!dmatest_match_channel(chan)
- || !dmatest_match_device(chan->device))
- ack = DMA_DUP;
- else if (max_channels && nr_channels >= max_channels)
- ack = DMA_NAK;
- else
- ack = dmatest_add_channel(chan);
- break;
-
- case DMA_RESOURCE_REMOVED:
- ack = dmatest_remove_channel(chan);
- break;
-
- default:
- pr_info("dmatest: Unhandled event %u (%s)\n",
- state, dev_name(&chan->dev));
- break;
- }
-
- return ack;
+ if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
+ return false;
+ else
+ return true;
}
-static struct dma_client dmatest_client = {
- .event_callback = dmatest_event,
-};
-
static int __init dmatest_init(void)
{
- dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
- dma_async_client_register(&dmatest_client);
- dma_async_client_chan_request(&dmatest_client);
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ int err = 0;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+ for (;;) {
+ chan = dma_request_channel(mask, filter, NULL);
+ if (chan) {
+ err = dmatest_add_channel(chan);
+ if (err == 0)
+ continue;
+ else {
+ dma_release_channel(chan);
+ break; /* add_channel failed, punt */
+ }
+ } else
+ break; /* no more channels available */
+ if (max_channels && nr_channels >= max_channels)
+ break; /* we have all we need */
+ }
- return 0;
+ return err;
}
-module_init(dmatest_init);
+/* when compiled-in wait for drivers to load first */
+late_initcall(dmatest_init);
static void __exit dmatest_exit(void)
{
- dma_async_client_unregister(&dmatest_client);
+ struct dmatest_chan *dtc, *_dtc;
+
+ list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ list_del(&dtc->node);
+ dmatest_cleanup_channel(dtc);
+ pr_debug("dmatest: dropped channel %s\n",
+ dma_chan_name(dtc->chan));
+ dma_release_channel(dtc->chan);
+ }
}
module_exit(dmatest_exit);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0778d99aea7..6b702cc46b3 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -70,6 +70,15 @@
* the controller, though.
*/
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+ return chan->dev->device.parent;
+}
+
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
ret = desc;
break;
}
- dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
+ dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
spin_unlock_bh(&dwc->lock);
- dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
+ dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
return ret;
}
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
struct dw_desc *child;
list_for_each_entry(child, &desc->txd.tx_list, desc_node)
- dma_sync_single_for_cpu(dwc->chan.dev.parent,
+ dma_sync_single_for_cpu(chan2parent(&dwc->chan),
child->txd.phys, sizeof(child->lli),
DMA_TO_DEVICE);
- dma_sync_single_for_cpu(dwc->chan.dev.parent,
+ dma_sync_single_for_cpu(chan2parent(&dwc->chan),
desc->txd.phys, sizeof(desc->lli),
DMA_TO_DEVICE);
}
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
spin_lock_bh(&dwc->lock);
list_for_each_entry(child, &desc->txd.tx_list, desc_node)
- dev_vdbg(&dwc->chan.dev,
+ dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
child);
list_splice_init(&desc->txd.tx_list, &dwc->free_list);
- dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
+ dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
spin_unlock_bh(&dwc->lock);
}
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
"BUG: Attempted to start non-idle channel\n");
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
channel_readl(dwc, SAR),
channel_readl(dwc, DAR),
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
- dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
+ dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
dwc->completed = txd->cookie;
callback = txd->callback;
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
* mapped before they were submitted...
*/
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
- dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
- DMA_FROM_DEVICE);
+ dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
+ desc->len, DMA_FROM_DEVICE);
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
- dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
- DMA_TO_DEVICE);
+ dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
+ desc->len, DMA_TO_DEVICE);
/*
* The API requires that no submissions are done from a
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
LIST_HEAD(list);
if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
"BUG: XFER bit set, but channel not idle!\n");
/* Try to continue after resetting the channel... */
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
- dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
+ dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
if (desc->lli.llp == llp)
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, desc);
}
- dev_err(&dwc->chan.dev,
+ dev_err(chan2dev(&dwc->chan),
"BUG: All descriptors done, but channel not idle!\n");
/* Try to continue after resetting the channel... */
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
- dev_printk(KERN_CRIT, &dwc->chan.dev,
+ dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
lli->sar, lli->dar, lli->llp,
lli->ctlhi, lli->ctllo);
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
* controller flagged an error instead of scribbling over
* random memory locations.
*/
- dev_printk(KERN_CRIT, &dwc->chan.dev,
+ dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
"Bad descriptor submitted for DMA!\n");
- dev_printk(KERN_CRIT, &dwc->chan.dev,
+ dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, &bad_desc->lli);
list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
* for DMA. But this is hard to do in a race-free manner.
*/
if (list_empty(&dwc->active_list)) {
- dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
dwc_dostart(dwc, desc);
list_add_tail(&desc->desc_node, &dwc->active_list);
} else {
- dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
list_add_tail(&desc->desc_node, &dwc->queue);
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
unsigned int dst_width;
u32 ctllo;
- dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
+ dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
dest, src, len, flags);
if (unlikely(!len)) {
- dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
+ dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
return NULL;
}
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
list_add_tail(&desc->desc_node,
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
@@ -562,15 +571,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(&chan->dev, "prep_dma_slave\n");
+ dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
if (unlikely(!dws || !sg_len))
return NULL;
- reg_width = dws->slave.reg_width;
+ reg_width = dws->reg_width;
prev = first = NULL;
- sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
+ sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
switch (direction) {
case DMA_TO_DEVICE:
@@ -579,7 +588,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2P);
- reg = dws->slave.tx_reg;
+ reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len;
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc = dwc_desc_get(dwc);
if (!desc) {
- dev_err(&chan->dev,
+ dev_err(chan2dev(chan),
"not enough descriptors available\n");
goto err_desc_get;
}
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys,
sizeof(prev->lli),
DMA_TO_DEVICE);
@@ -625,7 +634,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| DWC_CTLL_SRC_FIX
| DWC_CTLL_FC_P2M);
- reg = dws->slave.rx_reg;
+ reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len;
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc = dwc_desc_get(dwc);
if (!desc) {
- dev_err(&chan->dev,
+ dev_err(chan2dev(chan),
"not enough descriptors available\n");
goto err_desc_get;
}
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
first = desc;
} else {
prev->lli.llp = desc->txd.phys;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys,
sizeof(prev->lli),
DMA_TO_DEVICE);
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev->lli.ctllo |= DWC_CTLL_INT_EN;
prev->lli.llp = 0;
- dma_sync_single_for_device(chan->dev.parent,
+ dma_sync_single_for_device(chan2parent(chan),
prev->txd.phys, sizeof(prev->lli),
DMA_TO_DEVICE);
@@ -758,29 +767,21 @@ static void dwc_issue_pending(struct dma_chan *chan)
spin_unlock_bh(&dwc->lock);
}
-static int dwc_alloc_chan_resources(struct dma_chan *chan,
- struct dma_client *client)
+static int dwc_alloc_chan_resources(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
- struct dma_slave *slave;
struct dw_dma_slave *dws;
int i;
u32 cfghi;
u32 cfglo;
- dev_vdbg(&chan->dev, "alloc_chan_resources\n");
-
- /* Channels doing slave DMA can only handle one client. */
- if (dwc->dws || client->slave) {
- if (chan->client_count)
- return -EBUSY;
- }
+ dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_dbg(&chan->dev, "DMA channel not idle?\n");
+ dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
@@ -789,23 +790,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
cfghi = DWC_CFGH_FIFO_MODE;
cfglo = 0;
- slave = client->slave;
- if (slave) {
+ dws = dwc->dws;
+ if (dws) {
/*
* We need controller-specific data to set up slave
* transfers.
*/
- BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
-
- dws = container_of(slave, struct dw_dma_slave, slave);
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
- dwc->dws = dws;
cfghi = dws->cfg_hi;
cfglo = dws->cfg_lo;
- } else {
- dwc->dws = NULL;
}
-
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -822,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
- dev_info(&chan->dev,
+ dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
spin_lock_bh(&dwc->lock);
break;
@@ -832,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
INIT_LIST_HEAD(&desc->txd.tx_list);
- desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
+ desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
@@ -847,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
spin_unlock_bh(&dwc->lock);
- dev_dbg(&chan->dev,
+ dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
return i;
@@ -860,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
- dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
+ dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
dwc->descs_allocated);
/* ASSERT: channel is idle */
@@ -881,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_unlock_bh(&dwc->lock);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
- dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
- dma_unmap_single(chan->dev.parent, desc->txd.phys,
+ dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
+ dma_unmap_single(chan2parent(chan), desc->txd.phys,
sizeof(desc->lli), DMA_TO_DEVICE);
kfree(desc);
}
- dev_vdbg(&chan->dev, "free_chan_resources done\n");
+ dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b95dcce447..ca70a21afc6 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
*
* Return - The number of descriptors allocated.
*/
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
- struct dma_client *client)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
@@ -823,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
*/
WARN_ON(fdev->feature != new_fsl_chan->feature);
- new_fsl_chan->dev = &new_fsl_chan->common.dev;
+ new_fsl_chan->dev = &new_fsl_chan->common.dev->device;
new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 9b16a3af9a0..4105d6575b6 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -75,60 +75,10 @@ static int ioat_dca_enabled = 1;
module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
-{
- struct ioat_device *device = pci_get_drvdata(pdev);
- u8 version;
- int err = 0;
-
- version = readb(iobase + IOAT_VER_OFFSET);
- switch (version) {
- case IOAT_VER_1_2:
- device->dma = ioat_dma_probe(pdev, iobase);
- if (device->dma && ioat_dca_enabled)
- device->dca = ioat_dca_init(pdev, iobase);
- break;
- case IOAT_VER_2_0:
- device->dma = ioat_dma_probe(pdev, iobase);
- if (device->dma && ioat_dca_enabled)
- device->dca = ioat2_dca_init(pdev, iobase);
- break;
- case IOAT_VER_3_0:
- device->dma = ioat_dma_probe(pdev, iobase);
- if (device->dma && ioat_dca_enabled)
- device->dca = ioat3_dca_init(pdev, iobase);
- break;
- default:
- err = -ENODEV;
- break;
- }
- if (!device->dma)
- err = -ENODEV;
- return err;
-}
-
-static void ioat_shutdown_functionality(struct pci_dev *pdev)
-{
- struct ioat_device *device = pci_get_drvdata(pdev);
-
- dev_err(&pdev->dev, "Removing dma and dca services\n");
- if (device->dca) {
- unregister_dca_provider(device->dca);
- free_dca_provider(device->dca);
- device->dca = NULL;
- }
-
- if (device->dma) {
- ioat_dma_remove(device->dma);
- device->dma = NULL;
- }
-}
-
static struct pci_driver ioat_pci_driver = {
.name = "ioatdma",
.id_table = ioat_pci_tbl,
.probe = ioat_probe,
- .shutdown = ioat_shutdown_functionality,
.remove = __devexit_p(ioat_remove),
};
@@ -179,7 +129,29 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- err = ioat_setup_functionality(pdev, iobase);
+ switch (readb(iobase + IOAT_VER_OFFSET)) {
+ case IOAT_VER_1_2:
+ device->dma = ioat_dma_probe(pdev, iobase);
+ if (device->dma && ioat_dca_enabled)
+ device->dca = ioat_dca_init(pdev, iobase);
+ break;
+ case IOAT_VER_2_0:
+ device->dma = ioat_dma_probe(pdev, iobase);
+ if (device->dma && ioat_dca_enabled)
+ device->dca = ioat2_dca_init(pdev, iobase);
+ break;
+ case IOAT_VER_3_0:
+ device->dma = ioat_dma_probe(pdev, iobase);
+ if (device->dma && ioat_dca_enabled)
+ device->dca = ioat3_dca_init(pdev, iobase);
+ break;
+ default:
+ err = -ENODEV;
+ break;
+ }
+ if (!device->dma)
+ err = -ENODEV;
+
if (err)
goto err_version;
@@ -198,17 +170,21 @@ err_enable_device:
return err;
}
-/*
- * It is unsafe to remove this module: if removed while a requested
- * dma is outstanding, esp. from tcp, it is possible to hang while
- * waiting for something that will never finish. However, if you're
- * feeling lucky, this usually works just fine.
- */
static void __devexit ioat_remove(struct pci_dev *pdev)
{
struct ioat_device *device = pci_get_drvdata(pdev);
- ioat_shutdown_functionality(pdev);
+ dev_err(&pdev->dev, "Removing dma and dca services\n");
+ if (device->dca) {
+ unregister_dca_provider(device->dca);
+ free_dca_provider(device->dca);
+ device->dca = NULL;
+ }
+
+ if (device->dma) {
+ ioat_dma_remove(device->dma);
+ device->dma = NULL;
+ }
kfree(device);
}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 6607fdd00b1..b3759c4b653 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
* ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
* @chan: the channel to be filled out
*/
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
- struct dma_client *client)
+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *desc;
@@ -1341,12 +1340,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
*/
#define IOAT_TEST_SIZE 2000
-DECLARE_COMPLETION(test_completion);
static void ioat_dma_test_callback(void *dma_async_param)
{
- printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
- dma_async_param);
- complete(&test_completion);
+ struct completion *cmp = dma_async_param;
+
+ complete(cmp);
}
/**
@@ -1363,6 +1361,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int err = 0;
+ struct completion cmp;
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!src)
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
+ if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
dev_err(&device->pdev->dev,
"selftest cannot allocate chan resource\n");
err = -ENODEV;
@@ -1402,8 +1401,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
}
async_tx_ack(tx);
+ init_completion(&cmp);
tx->callback = ioat_dma_test_callback;
- tx->callback_param = (void *)0x8086;
+ tx->callback_param = &cmp;
cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(&device->pdev->dev,
@@ -1413,7 +1413,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
}
device->common.device_issue_pending(dma_chan);
- wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
+ wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
!= DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 6be31726220..ea5440dd10d 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -24,7 +24,6 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/async_tx.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
}
/* run dependent operations */
- async_tx_run_dependencies(&desc->async_tx);
+ dma_run_dependencies(&desc->async_tx);
return cookie;
}
@@ -270,8 +269,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
break;
}
- BUG_ON(!seen_current);
-
if (cookie > 0) {
iop_chan->completed_cookie = cookie;
pr_debug("\tcompleted cookie %d\n", cookie);
@@ -471,8 +468,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
* greater than 2x the number slots needed to satisfy a device->max_xor
* request.
* */
-static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
- struct dma_client *client)
+static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
{
char *hw_desc;
int idx;
@@ -866,7 +862,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
+ if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
@@ -964,7 +960,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
+ if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
@@ -1115,26 +1111,13 @@ static int __devexit iop_adma_remove(struct platform_device *dev)
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
struct iop_adma_chan *iop_chan;
- int i;
struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
dma_async_device_unregister(&device->common);
- for (i = 0; i < 3; i++) {
- unsigned int irq;
- irq = platform_get_irq(dev, i);
- free_irq(irq, device);
- }
-
dma_free_coherent(&dev->dev, plat_data->pool_size,
device->dma_desc_pool_virt, device->dma_desc_pool);
- do {
- struct resource *res;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start);
- } while (0);
-
list_for_each_entry_safe(chan, _chan, &device->common.channels,
device_node) {
iop_chan = to_iop_adma_chan(chan);
@@ -1255,7 +1238,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
spin_lock_init(&iop_chan->lock);
INIT_LIST_HEAD(&iop_chan->chain);
INIT_LIST_HEAD(&iop_chan->all_slots);
- INIT_RCU_HEAD(&iop_chan->common.rcu);
iop_chan->common.device = dma_dev;
list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
@@ -1431,16 +1413,12 @@ static int __init iop_adma_init (void)
return platform_driver_register(&iop_adma_driver);
}
-/* it's currently unsafe to unload this module */
-#if 0
static void __exit iop_adma_exit (void)
{
platform_driver_unregister(&iop_adma_driver);
return;
}
module_exit(iop_adma_exit);
-#endif
-
module_init(iop_adma_init);
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcda1742641..d35cbd1ff0b 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/async_tx.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
}
/* run dependent operations */
- async_tx_run_dependencies(&desc->async_tx);
+ dma_run_dependencies(&desc->async_tx);
return cookie;
}
@@ -607,8 +606,7 @@ submit_done:
}
/* returns the number of allocated descriptors */
-static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
- struct dma_client *client)
+static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
char *hw_desc;
int idx;
@@ -958,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+ if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
@@ -1053,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+ if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
@@ -1221,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mv_chan->chain);
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->all_slots);
- INIT_RCU_HEAD(&mv_chan->common.rcu);
mv_chan->common.device = dma_dev;
list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 1e97916914a..76bfe16c09b 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -55,7 +55,6 @@ enum atmel_mci_state {
struct atmel_mci_dma {
#ifdef CONFIG_MMC_ATMELMCI_DMA
- struct dma_client client;
struct dma_chan *chan;
struct dma_async_tx_descriptor *data_desc;
#endif
@@ -593,10 +592,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
/* If we don't have a channel, we can't do DMA */
chan = host->dma.chan;
- if (chan) {
- dma_chan_get(chan);
+ if (chan)
host->data_chan = chan;
- }
if (!chan)
return -ENODEV;
@@ -1443,60 +1440,6 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_MMC_ATMELMCI_DMA
-
-static inline struct atmel_mci *
-dma_client_to_atmel_mci(struct dma_client *client)
-{
- return container_of(client, struct atmel_mci, dma.client);
-}
-
-static enum dma_state_client atmci_dma_event(struct dma_client *client,
- struct dma_chan *chan, enum dma_state state)
-{
- struct atmel_mci *host;
- enum dma_state_client ret = DMA_NAK;
-
- host = dma_client_to_atmel_mci(client);
-
- switch (state) {
- case DMA_RESOURCE_AVAILABLE:
- spin_lock_bh(&host->lock);
- if (!host->dma.chan) {
- host->dma.chan = chan;
- ret = DMA_ACK;
- }
- spin_unlock_bh(&host->lock);
-
- if (ret == DMA_ACK)
- dev_info(&host->pdev->dev,
- "Using %s for DMA transfers\n",
- chan->dev.bus_id);
- break;
-
- case DMA_RESOURCE_REMOVED:
- spin_lock_bh(&host->lock);
- if (host->dma.chan == chan) {
- host->dma.chan = NULL;
- ret = DMA_ACK;
- }
- spin_unlock_bh(&host->lock);
-
- if (ret == DMA_ACK)
- dev_info(&host->pdev->dev,
- "Lost %s, falling back to PIO\n",
- chan->dev.bus_id);
- break;
-
- default:
- break;
- }
-
-
- return ret;
-}
-#endif /* CONFIG_MMC_ATMELMCI_DMA */
-
static int __init atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
u32 sdc_reg)
@@ -1600,6 +1543,18 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
mmc_free_host(slot->mmc);
}
+#ifdef CONFIG_MMC_ATMELMCI_DMA
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct dw_dma_slave *dws = slave;
+
+ if (dws->dma_dev == chan->device->dev)
+ return true;
+ else
+ return false;
+}
+#endif
+
static int __init atmci_probe(struct platform_device *pdev)
{
struct mci_platform_data *pdata;
@@ -1652,22 +1607,20 @@ static int __init atmci_probe(struct platform_device *pdev)
goto err_request_irq;
#ifdef CONFIG_MMC_ATMELMCI_DMA
- if (pdata->dma_slave) {
- struct dma_slave *slave = pdata->dma_slave;
+ if (pdata->dma_slave.dma_dev) {
+ struct dw_dma_slave *dws = &pdata->dma_slave;
+ dma_cap_mask_t mask;
- slave->tx_reg = regs->start + MCI_TDR;
- slave->rx_reg = regs->start + MCI_RDR;
+ dws->tx_reg = regs->start + MCI_TDR;
+ dws->rx_reg = regs->start + MCI_RDR;
/* Try to grab a DMA channel */
- host->dma.client.event_callback = atmci_dma_event;
- dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
- host->dma.client.slave = slave;
-
- dma_async_client_register(&host->dma.client);
- dma_async_client_chan_request(&host->dma.client);
- } else {
- dev_notice(&pdev->dev, "DMA not available, using PIO\n");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma.chan = dma_request_channel(mask, filter, dws);
}
+ if (!host->dma.chan)
+ dev_notice(&pdev->dev, "DMA not available, using PIO\n");
#endif /* CONFIG_MMC_ATMELMCI_DMA */
platform_set_drvdata(pdev, host);
@@ -1699,8 +1652,8 @@ static int __init atmci_probe(struct platform_device *pdev)
err_init_slot:
#ifdef CONFIG_MMC_ATMELMCI_DMA
- if (pdata->dma_slave)
- dma_async_client_unregister(&host->dma.client);
+ if (host->dma.chan)
+ dma_release_channel(host->dma.chan);
#endif
free_irq(irq, host);
err_request_irq:
@@ -1731,8 +1684,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
clk_disable(host->mck);
#ifdef CONFIG_MMC_ATMELMCI_DMA
- if (host->dma.client.slave)
- dma_async_client_unregister(&host->dma.client);
+ if (host->dma.chan)
+ dma_release_channel(host->dma.chan);
#endif
free_irq(platform_get_irq(pdev, 0), host);
@@ -1761,7 +1714,7 @@ static void __exit atmci_exit(void)
platform_driver_unregister(&atmci_driver);
}
-module_init(atmci_init);
+late_initcall(atmci_init); /* try to load after dma driver when built-in */
module_exit(atmci_exit);
MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");