diff options
author | Viresh Kumar <viresh.kumar@st.com> | 2011-04-15 16:03:35 +0530 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2011-05-13 19:39:46 +0530 |
commit | 5fedefb87bd0a64281d28edd295f29e3b989d78c (patch) | |
tree | 685ad5799aee56907febe1cd22aa240a90ff35a5 | |
parent | de7a2f9f7b6f5b48d8531ff4c9c9b95cab8a8ce8 (diff) |
dmaengine/dw_dmac: don't call callback routine in case dmaengine_terminate_all() is called
If dmaengine_terminate_all() is called for dma channel, then it doesn't make
much sense to call registered callback routine. While in case of success or
failure it must be called.
Signed-off-by: Viresh Kumar <viresh.kumar@st.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/dw_dmac.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index b15c32ca0ef..265d43c8b35 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -195,18 +195,21 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) /*----------------------------------------------------------------------*/ static void -dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) +dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, + bool callback_required) { - dma_async_tx_callback callback; - void *param; + dma_async_tx_callback callback = NULL; + void *param = NULL; struct dma_async_tx_descriptor *txd = &desc->txd; struct dw_desc *child; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); dwc->completed = txd->cookie; - callback = txd->callback; - param = txd->callback_param; + if (callback_required) { + callback = txd->callback; + param = txd->callback_param; + } dwc_sync_desc_for_cpu(dwc, desc); @@ -238,11 +241,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) } } - /* - * The API requires that no submissions are done from a - * callback, so we don't need to drop the lock here - */ - if (callback) + if (callback_required && callback) callback(param); } @@ -272,7 +271,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) } list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc); + dwc_descriptor_complete(dwc, desc, true); } static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) @@ -322,7 +321,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) * No descriptors so far seem to be in progress, i.e. * this one must be done. */ - dwc_descriptor_complete(dwc, desc); + dwc_descriptor_complete(dwc, desc, true); } dev_err(chan2dev(&dwc->chan), @@ -384,7 +383,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_dump_lli(dwc, &child->lli); /* Pretend the descriptor completed successfully */ - dwc_descriptor_complete(dwc, bad_desc); + dwc_descriptor_complete(dwc, bad_desc, true); } /* --------------------- Cyclic DMA API extensions -------------------- */ @@ -831,7 +830,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc); + dwc_descriptor_complete(dwc, desc, false); return 0; } |