diff options
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8db0e7f9d3f..97b329e7679 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -42,9 +42,9 @@ * * Each device has a kref, which is initialized to 1 when the device is * registered. A kref_get is done for each device registered. When the - * device is released, the coresponding kref_put is done in the release + * device is released, the corresponding kref_put is done in the release * method. Every time one of the device's channels is allocated to a client, - * a kref_get occurs. When the channel is freed, the coresponding kref_put + * a kref_get occurs. When the channel is freed, the corresponding kref_put * happens. The device's release function does a completion, so * unregister_device does a remove event, device_unregister, a kref_put * for the first reference, then waits on the completion for all other @@ -53,7 +53,7 @@ * Each channel has an open-coded implementation of Rusty Russell's "bigref," * with a kref and a per_cpu local_t. A dma_chan_get is called when a client * signals that it wants to use a channel, and dma_chan_put is called when - * a channel is removed or a client using it is unregesitered. A client can + * a channel is removed or a client using it is unregistered. A client can * take extra references per outstanding transaction, as is the case with * the NET DMA client. The release function does a kref_put on the device. * -ChrisL, DanW @@ -362,7 +362,6 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); - BUG_ON(!device->device_dependency_added); BUG_ON(!device->device_is_tx_complete); BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); @@ -479,7 +478,8 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, + DMA_CTRL_ACK); if (!tx) { dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -487,7 +487,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, return -ENOMEM; } - tx->ack = 1; tx->callback = NULL; cookie = tx->tx_submit(tx); @@ -525,7 +524,8 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, + DMA_CTRL_ACK); if (!tx) { dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -533,7 +533,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, return -ENOMEM; } - tx->ack = 1; tx->callback = NULL; cookie = tx->tx_submit(tx); @@ -574,7 +573,8 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, + DMA_CTRL_ACK); if (!tx) { dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -582,7 +582,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, return -ENOMEM; } - tx->ack = 1; tx->callback = NULL; cookie = tx->tx_submit(tx); @@ -600,8 +599,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); - INIT_LIST_HEAD(&tx->depend_node); - INIT_LIST_HEAD(&tx->depend_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); |