summaryrefslogtreecommitdiffstats
path: root/drivers/dma/iovlock.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 12:02:15 -0700
committerDan Williams <dan.j.williams@intel.com>2009-09-08 17:38:54 -0700
commit4b652f0db3be891c7b76b109c3b55003b920fc96 (patch)
treea7747543a2076a2f58f423297e0da78b2963a04d /drivers/dma/iovlock.c
parenta309218acee8606f7e235da20cc826eb06d9b0f6 (diff)
net_dma: poll for a descriptor after allocation failure
Handle descriptor allocation failures by polling for a descriptor. The driver will force forward progress when polled. In the best case this polling interval will be the time it takes for one dma memcpy transaction to complete. In the worst case, channel hang, we will need to wait 100ms for the cleanup watchdog to fire (ioatdma driver). Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/iovlock.c')
-rw-r--r--drivers/dma/iovlock.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index 9f6fe46a9b8..c0a272c7368 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
iov_byte_offset,
kdata,
copy);
+ /* poll for a descriptor slot */
+ if (unlikely(dma_cookie < 0)) {
+ dma_async_issue_pending(chan);
+ continue;
+ }
len -= copy;
iov[iovec_idx].iov_len -= copy;
@@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
page,
offset,
copy);
+ /* poll for a descriptor slot */
+ if (unlikely(dma_cookie < 0)) {
+ dma_async_issue_pending(chan);
+ continue;
+ }
len -= copy;
iov[iovec_idx].iov_len -= copy;