Browse Source

dma: pl330: Differentiate between submitted and issued descriptors

The pl330 dmaengine driver currently does not differentiate between submitted
and issued descriptors. It won't start transferring a newly submitted
descriptor until issue_pending() is called, but only if it is idle. If it is
active and a new descriptor is submitted before it goes idle it will happily
start the newly submitted descriptor once all earlier submitted descriptors have
been completed. This is not a 100% correct with regards to the dmaengine
interface semantics. A descriptor is not supposed to be started until the next
issue_pending() call after the descriptor has been submitted. This patch adds a
second per channel list that keeps track of the submitted descriptors. Once
issue_pending() is called the submitted descriptors are moved to the working
list and only descriptors on the working list are started.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Lars-Peter Clausen 11 years ago
parent
commit
04abf5daf7
1 changed files with 20 additions and 4 deletions
  1. 20 4
      drivers/dma/pl330.c

+ 20 - 4
drivers/dma/pl330.c

@@ -543,7 +543,9 @@ struct dma_pl330_chan {
 	/* DMA-Engine Channel */
 	/* DMA-Engine Channel */
 	struct dma_chan chan;
 	struct dma_chan chan;
 
 
-	/* List of to be xfered descriptors */
+	/* List of submitted descriptors */
+	struct list_head submitted_list;
+	/* List of issued descriptors */
 	struct list_head work_list;
 	struct list_head work_list;
 	/* List of completed descriptors */
 	/* List of completed descriptors */
 	struct list_head completed_list;
 	struct list_head completed_list;
@@ -2388,6 +2390,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 		pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
 
 		/* Mark all desc done */
 		/* Mark all desc done */
+		list_for_each_entry(desc, &pch->submitted_list, node) {
+			desc->status = FREE;
+			dma_cookie_complete(&desc->txd);
+		}
+
 		list_for_each_entry(desc, &pch->work_list , node) {
 		list_for_each_entry(desc, &pch->work_list , node) {
 			desc->status = FREE;
 			desc->status = FREE;
 			dma_cookie_complete(&desc->txd);
 			dma_cookie_complete(&desc->txd);
@@ -2398,6 +2405,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
 			dma_cookie_complete(&desc->txd);
 			dma_cookie_complete(&desc->txd);
 		}
 		}
 
 
+		list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
 		list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
 		spin_unlock_irqrestore(&pch->lock, flags);
 		spin_unlock_irqrestore(&pch->lock, flags);
@@ -2456,7 +2464,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 
 
 static void pl330_issue_pending(struct dma_chan *chan)
 static void pl330_issue_pending(struct dma_chan *chan)
 {
 {
-	pl330_tasklet((unsigned long) to_pchan(chan));
+	struct dma_pl330_chan *pch = to_pchan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pch->lock, flags);
+	list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+	spin_unlock_irqrestore(&pch->lock, flags);
+
+	pl330_tasklet((unsigned long)pch);
 }
 }
 
 
 /*
 /*
@@ -2483,11 +2498,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 
 
 		dma_cookie_assign(&desc->txd);
 		dma_cookie_assign(&desc->txd);
 
 
-		list_move_tail(&desc->node, &pch->work_list);
+		list_move_tail(&desc->node, &pch->submitted_list);
 	}
 	}
 
 
 	cookie = dma_cookie_assign(&last->txd);
 	cookie = dma_cookie_assign(&last->txd);
-	list_add_tail(&last->node, &pch->work_list);
+	list_add_tail(&last->node, &pch->submitted_list);
 	spin_unlock_irqrestore(&pch->lock, flags);
 	spin_unlock_irqrestore(&pch->lock, flags);
 
 
 	return cookie;
 	return cookie;
@@ -2979,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 		else
 		else
 			pch->chan.private = adev->dev.of_node;
 			pch->chan.private = adev->dev.of_node;
 
 
+		INIT_LIST_HEAD(&pch->submitted_list);
 		INIT_LIST_HEAD(&pch->work_list);
 		INIT_LIST_HEAD(&pch->work_list);
 		INIT_LIST_HEAD(&pch->completed_list);
 		INIT_LIST_HEAD(&pch->completed_list);
 		spin_lock_init(&pch->lock);
 		spin_lock_init(&pch->lock);