|
@@ -684,17 +684,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
* possible, perhaps even appending to those already submitted
|
|
|
* for DMA. But this is hard to do in a race-free manner.
|
|
|
*/
|
|
|
- if (list_empty(&dwc->active_list)) {
|
|
|
- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
|
|
|
- desc->txd.cookie);
|
|
|
- list_add_tail(&desc->desc_node, &dwc->active_list);
|
|
|
- dwc_dostart(dwc, dwc_first_active(dwc));
|
|
|
- } else {
|
|
|
- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
|
|
|
- desc->txd.cookie);
|
|
|
|
|
|
- list_add_tail(&desc->desc_node, &dwc->queue);
|
|
|
- }
|
|
|
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
|
|
|
+ list_add_tail(&desc->desc_node, &dwc->queue);
|
|
|
|
|
|
spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
@@ -1099,9 +1091,12 @@ dwc_tx_status(struct dma_chan *chan,
|
|
|
static void dwc_issue_pending(struct dma_chan *chan)
|
|
|
{
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- if (!list_empty(&dwc->queue))
|
|
|
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
+ if (list_empty(&dwc->active_list))
|
|
|
+ dwc_dostart_first_queued(dwc);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
}
|
|
|
|
|
|
static int dwc_alloc_chan_resources(struct dma_chan *chan)
|