|
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
spin_lock_irqsave(&vc->lock, flags);
|
|
|
cookie = dma_cookie_assign(tx);
|
|
|
|
|
|
- list_add_tail(&vd->node, &vc->desc_submitted);
|
|
|
+ list_move_tail(&vd->node, &vc->desc_submitted);
|
|
|
spin_unlock_irqrestore(&vc->lock, flags);
|
|
|
|
|
|
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
|
|
@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vchan_tx_submit);
|
|
|
|
|
|
+/**
|
|
|
+ * vchan_tx_desc_free - free a reusable descriptor
|
|
|
+ * @tx: the transfer
|
|
|
+ *
|
|
|
+ * This function frees a previously allocated reusable descriptor. The only
|
|
|
+ * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
|
|
|
+ * transfer.
|
|
|
+ *
|
|
|
+ * Returns 0 upon success
|
|
|
+ */
|
|
|
+int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
|
|
|
+{
|
|
|
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
|
|
|
+ struct virt_dma_desc *vd = to_virt_desc(tx);
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&vc->lock, flags);
|
|
|
+ list_del(&vd->node);
|
|
|
+ spin_unlock_irqrestore(&vc->lock, flags);
|
|
|
+
|
|
|
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
|
|
|
+ vc, vd, vd->tx.cookie);
|
|
|
+ vc->desc_free(vd);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
|
|
|
+
|
|
|
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
|
|
|
dma_cookie_t cookie)
|
|
|
{
|
|
@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
|
|
|
cb_data = vd->tx.callback_param;
|
|
|
|
|
|
list_del(&vd->node);
|
|
|
-
|
|
|
- vc->desc_free(vd);
|
|
|
+ if (dmaengine_desc_test_reuse(&vd->tx))
|
|
|
+ list_add(&vd->node, &vc->desc_allocated);
|
|
|
+ else
|
|
|
+ vc->desc_free(vd);
|
|
|
|
|
|
if (cb)
|
|
|
cb(cb_data);
|
|
@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
|
|
|
while (!list_empty(head)) {
|
|
|
struct virt_dma_desc *vd = list_first_entry(head,
|
|
|
struct virt_dma_desc, node);
|
|
|
- list_del(&vd->node);
|
|
|
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
|
|
- vc->desc_free(vd);
|
|
|
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
|
|
|
+ list_move_tail(&vd->node, &vc->desc_allocated);
|
|
|
+ } else {
|
|
|
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
|
|
+ list_del(&vd->node);
|
|
|
+ vc->desc_free(vd);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
|
|
@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
|
|
|
dma_cookie_init(&vc->chan);
|
|
|
|
|
|
spin_lock_init(&vc->lock);
|
|
|
+ INIT_LIST_HEAD(&vc->desc_allocated);
|
|
|
INIT_LIST_HEAD(&vc->desc_submitted);
|
|
|
INIT_LIST_HEAD(&vc->desc_issued);
|
|
|
INIT_LIST_HEAD(&vc->desc_completed);
|