virt-dma.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /*
  2. * Virtual DMA channel support for DMAengine
  3. *
  4. * Copyright (C) 2012 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/module.h>
  13. #include <linux/spinlock.h>
  14. #include "virt-dma.h"
  15. static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
  16. {
  17. return container_of(tx, struct virt_dma_desc, tx);
  18. }
  19. dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
  20. {
  21. struct virt_dma_chan *vc = to_virt_chan(tx->chan);
  22. struct virt_dma_desc *vd = to_virt_desc(tx);
  23. unsigned long flags;
  24. dma_cookie_t cookie;
  25. spin_lock_irqsave(&vc->lock, flags);
  26. cookie = dma_cookie_assign(tx);
  27. list_move_tail(&vd->node, &vc->desc_submitted);
  28. spin_unlock_irqrestore(&vc->lock, flags);
  29. dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
  30. vc, vd, cookie);
  31. return cookie;
  32. }
  33. EXPORT_SYMBOL_GPL(vchan_tx_submit);
  34. /**
  35. * vchan_tx_desc_free - free a reusable descriptor
  36. * @tx: the transfer
  37. *
  38. * This function frees a previously allocated reusable descriptor. The only
  39. * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
  40. * transfer.
  41. *
  42. * Returns 0 upon success
  43. */
  44. int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
  45. {
  46. struct virt_dma_chan *vc = to_virt_chan(tx->chan);
  47. struct virt_dma_desc *vd = to_virt_desc(tx);
  48. unsigned long flags;
  49. spin_lock_irqsave(&vc->lock, flags);
  50. list_del(&vd->node);
  51. spin_unlock_irqrestore(&vc->lock, flags);
  52. dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
  53. vc, vd, vd->tx.cookie);
  54. vc->desc_free(vd);
  55. return 0;
  56. }
  57. EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
  58. struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
  59. dma_cookie_t cookie)
  60. {
  61. struct virt_dma_desc *vd;
  62. list_for_each_entry(vd, &vc->desc_issued, node)
  63. if (vd->tx.cookie == cookie)
  64. return vd;
  65. return NULL;
  66. }
  67. EXPORT_SYMBOL_GPL(vchan_find_desc);
  68. /*
  69. * This tasklet handles the completion of a DMA descriptor by
  70. * calling its callback and freeing it.
  71. */
  72. static void vchan_complete(unsigned long arg)
  73. {
  74. struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
  75. struct virt_dma_desc *vd;
  76. dma_async_tx_callback cb = NULL;
  77. void *cb_data = NULL;
  78. LIST_HEAD(head);
  79. spin_lock_irq(&vc->lock);
  80. list_splice_tail_init(&vc->desc_completed, &head);
  81. vd = vc->cyclic;
  82. if (vd) {
  83. vc->cyclic = NULL;
  84. cb = vd->tx.callback;
  85. cb_data = vd->tx.callback_param;
  86. }
  87. spin_unlock_irq(&vc->lock);
  88. if (cb)
  89. cb(cb_data);
  90. while (!list_empty(&head)) {
  91. vd = list_first_entry(&head, struct virt_dma_desc, node);
  92. cb = vd->tx.callback;
  93. cb_data = vd->tx.callback_param;
  94. list_del(&vd->node);
  95. if (dmaengine_desc_test_reuse(&vd->tx))
  96. list_add(&vd->node, &vc->desc_allocated);
  97. else
  98. vc->desc_free(vd);
  99. if (cb)
  100. cb(cb_data);
  101. }
  102. }
  103. void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
  104. {
  105. while (!list_empty(head)) {
  106. struct virt_dma_desc *vd = list_first_entry(head,
  107. struct virt_dma_desc, node);
  108. if (dmaengine_desc_test_reuse(&vd->tx)) {
  109. list_move_tail(&vd->node, &vc->desc_allocated);
  110. } else {
  111. dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
  112. list_del(&vd->node);
  113. vc->desc_free(vd);
  114. }
  115. }
  116. }
  117. EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
  118. void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
  119. {
  120. dma_cookie_init(&vc->chan);
  121. spin_lock_init(&vc->lock);
  122. INIT_LIST_HEAD(&vc->desc_allocated);
  123. INIT_LIST_HEAD(&vc->desc_submitted);
  124. INIT_LIST_HEAD(&vc->desc_issued);
  125. INIT_LIST_HEAD(&vc->desc_completed);
  126. tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
  127. vc->chan.device = dmadev;
  128. list_add_tail(&vc->chan.device_node, &dmadev->channels);
  129. }
  130. EXPORT_SYMBOL_GPL(vchan_init);
  131. MODULE_AUTHOR("Russell King");
  132. MODULE_LICENSE("GPL");