diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-23 12:03:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-23 12:03:18 -0700 |
commit | 5554b35933245e95710d709175e14c02cbc956a4 (patch) | |
tree | 2eeb2f05a7061da3c9a3bc9ea69a344b990c6b49 /drivers/dma/dmaengine.c | |
parent | 0f6e38a6381446eff5175b77d1094834a633a90f (diff) | |
parent | 7f1b358a236ee9c19657a619ac6f2dcabcaa0924 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (24 commits)
I/OAT: I/OAT version 3.0 support
I/OAT: tcp_dma_copybreak default value dependent on I/OAT version
I/OAT: Add watchdog/reset functionality to ioatdma
iop_adma: cleanup iop_chan_xor_slot_count
iop_adma: document how to calculate the minimum descriptor pool size
iop_adma: directly reclaim descriptors on allocation failure
async_tx: make async_tx_test_ack a boolean routine
async_tx: remove depend_tx from async_tx_sync_epilog
async_tx: export async_tx_quiesce
async_tx: fix handling of the "out of descriptor" condition in async_xor
async_tx: ensure the xor destination buffer remains dma-mapped
async_tx: list_for_each_entry_rcu() cleanup
dmaengine: Driver for the Synopsys DesignWare DMA controller
dmaengine: Add slave DMA interface
dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap
dmaengine: Add dma_client parameter to device_alloc_chan_resources
dmatest: Simple DMA memcpy test client
dmaengine: DMA engine driver for Marvell XOR engine
iop-adma: fix platform driver hotplug/coldplug
dmaengine: track the number of clients using a channel
...
Fixed up conflict in drivers/dca/dca-sysfs.c manually
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 97b329e7679..dc003a3a787 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client) enum dma_state_client ack; /* Find a channel */ - list_for_each_entry(device, &dma_device_list, global_node) + list_for_each_entry(device, &dma_device_list, global_node) { + /* Does the client require a specific DMA controller? */ + if (client->slave && client->slave->dma_dev + && client->slave->dma_dev != device->dev) + continue; + list_for_each_entry(chan, &device->channels, device_node) { if (!dma_chan_satisfies_mask(chan, client->cap_mask)) continue; - desc = chan->device->device_alloc_chan_resources(chan); + desc = chan->device->device_alloc_chan_resources( + chan, client); if (desc >= 0) { ack = client->event_callback(client, chan, @@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client) /* we are done once this client rejects * an available resource */ - if (ack == DMA_ACK) + if (ack == DMA_ACK) { dma_chan_get(chan); - else if (ack == DMA_NAK) + chan->client_count++; + } else if (ack == DMA_NAK) return; } } + } } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan) /* client was holding resources for this channel so * free it */ - if (ack == DMA_ACK) + if (ack == DMA_ACK) { dma_chan_put(chan); + chan->client_count--; + } } mutex_unlock(&dma_list_mutex); @@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan) */ void dma_async_client_register(struct dma_client *client) { + /* validate client data */ + BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) && + !client->slave); + mutex_lock(&dma_list_mutex); list_add_tail(&client->global_node, &dma_client_list); mutex_unlock(&dma_list_mutex); @@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client) ack = client->event_callback(client, chan, DMA_RESOURCE_REMOVED); - if (ack == DMA_ACK) + if (ack == DMA_ACK) { dma_chan_put(chan); + chan->client_count--; + } } list_del(&client->global_node); @@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt); + BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && + !device->device_prep_slave_sg); + BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && + !device->device_terminate_all); BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); @@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device) chan->chan_id = chancnt++; chan->dev.class = &dma_devclass; - chan->dev.parent = NULL; + chan->dev.parent = device->dev; snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", device->dev_id, chan->chan_id); @@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device) kref_get(&device->refcount); kref_get(&device->refcount); kref_init(&chan->refcount); + chan->client_count = 0; chan->slow_ref = 0; INIT_RCU_HEAD(&chan->rcu); } |