seq_fifo.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * ALSA sequencer FIFO
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. *
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. #include <sound/core.h>
  22. #include <linux/slab.h>
  23. #include "seq_fifo.h"
  24. #include "seq_lock.h"
  25. /* FIFO */
  26. /* create new fifo */
  27. struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
  28. {
  29. struct snd_seq_fifo *f;
  30. f = kzalloc(sizeof(*f), GFP_KERNEL);
  31. if (!f)
  32. return NULL;
  33. f->pool = snd_seq_pool_new(poolsize);
  34. if (f->pool == NULL) {
  35. kfree(f);
  36. return NULL;
  37. }
  38. if (snd_seq_pool_init(f->pool) < 0) {
  39. snd_seq_pool_delete(&f->pool);
  40. kfree(f);
  41. return NULL;
  42. }
  43. spin_lock_init(&f->lock);
  44. snd_use_lock_init(&f->use_lock);
  45. init_waitqueue_head(&f->input_sleep);
  46. atomic_set(&f->overflow, 0);
  47. f->head = NULL;
  48. f->tail = NULL;
  49. f->cells = 0;
  50. return f;
  51. }
  52. void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
  53. {
  54. struct snd_seq_fifo *f;
  55. if (snd_BUG_ON(!fifo))
  56. return;
  57. f = *fifo;
  58. if (snd_BUG_ON(!f))
  59. return;
  60. *fifo = NULL;
  61. snd_seq_fifo_clear(f);
  62. /* wake up clients if any */
  63. if (waitqueue_active(&f->input_sleep))
  64. wake_up(&f->input_sleep);
  65. /* release resources...*/
  66. /*....................*/
  67. if (f->pool) {
  68. snd_seq_pool_done(f->pool);
  69. snd_seq_pool_delete(&f->pool);
  70. }
  71. kfree(f);
  72. }
  73. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
  74. /* clear queue */
  75. void snd_seq_fifo_clear(struct snd_seq_fifo *f)
  76. {
  77. struct snd_seq_event_cell *cell;
  78. unsigned long flags;
  79. /* clear overflow flag */
  80. atomic_set(&f->overflow, 0);
  81. snd_use_lock_sync(&f->use_lock);
  82. spin_lock_irqsave(&f->lock, flags);
  83. /* drain the fifo */
  84. while ((cell = fifo_cell_out(f)) != NULL) {
  85. snd_seq_cell_free(cell);
  86. }
  87. spin_unlock_irqrestore(&f->lock, flags);
  88. }
  89. /* enqueue event to fifo */
  90. int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
  91. struct snd_seq_event *event)
  92. {
  93. struct snd_seq_event_cell *cell;
  94. unsigned long flags;
  95. int err;
  96. if (snd_BUG_ON(!f))
  97. return -EINVAL;
  98. snd_use_lock_use(&f->use_lock);
  99. err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
  100. if (err < 0) {
  101. if ((err == -ENOMEM) || (err == -EAGAIN))
  102. atomic_inc(&f->overflow);
  103. snd_use_lock_free(&f->use_lock);
  104. return err;
  105. }
  106. /* append new cells to fifo */
  107. spin_lock_irqsave(&f->lock, flags);
  108. if (f->tail != NULL)
  109. f->tail->next = cell;
  110. f->tail = cell;
  111. if (f->head == NULL)
  112. f->head = cell;
  113. f->cells++;
  114. spin_unlock_irqrestore(&f->lock, flags);
  115. /* wakeup client */
  116. if (waitqueue_active(&f->input_sleep))
  117. wake_up(&f->input_sleep);
  118. snd_use_lock_free(&f->use_lock);
  119. return 0; /* success */
  120. }
  121. /* dequeue cell from fifo */
  122. static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
  123. {
  124. struct snd_seq_event_cell *cell;
  125. if ((cell = f->head) != NULL) {
  126. f->head = cell->next;
  127. /* reset tail if this was the last element */
  128. if (f->tail == cell)
  129. f->tail = NULL;
  130. cell->next = NULL;
  131. f->cells--;
  132. }
  133. return cell;
  134. }
  135. /* dequeue cell from fifo and copy on user space */
  136. int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
  137. struct snd_seq_event_cell **cellp, int nonblock)
  138. {
  139. struct snd_seq_event_cell *cell;
  140. unsigned long flags;
  141. wait_queue_t wait;
  142. if (snd_BUG_ON(!f))
  143. return -EINVAL;
  144. *cellp = NULL;
  145. init_waitqueue_entry(&wait, current);
  146. spin_lock_irqsave(&f->lock, flags);
  147. while ((cell = fifo_cell_out(f)) == NULL) {
  148. if (nonblock) {
  149. /* non-blocking - return immediately */
  150. spin_unlock_irqrestore(&f->lock, flags);
  151. return -EAGAIN;
  152. }
  153. set_current_state(TASK_INTERRUPTIBLE);
  154. add_wait_queue(&f->input_sleep, &wait);
  155. spin_unlock_irq(&f->lock);
  156. schedule();
  157. spin_lock_irq(&f->lock);
  158. remove_wait_queue(&f->input_sleep, &wait);
  159. if (signal_pending(current)) {
  160. spin_unlock_irqrestore(&f->lock, flags);
  161. return -ERESTARTSYS;
  162. }
  163. }
  164. spin_unlock_irqrestore(&f->lock, flags);
  165. *cellp = cell;
  166. return 0;
  167. }
  168. void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
  169. struct snd_seq_event_cell *cell)
  170. {
  171. unsigned long flags;
  172. if (cell) {
  173. spin_lock_irqsave(&f->lock, flags);
  174. cell->next = f->head;
  175. f->head = cell;
  176. f->cells++;
  177. spin_unlock_irqrestore(&f->lock, flags);
  178. }
  179. }
  180. /* polling; return non-zero if queue is available */
  181. int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
  182. poll_table *wait)
  183. {
  184. poll_wait(file, &f->input_sleep, wait);
  185. return (f->cells > 0);
  186. }
  187. /* change the size of pool; all old events are removed */
  188. int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
  189. {
  190. unsigned long flags;
  191. struct snd_seq_pool *newpool, *oldpool;
  192. struct snd_seq_event_cell *cell, *next, *oldhead;
  193. if (snd_BUG_ON(!f || !f->pool))
  194. return -EINVAL;
  195. /* allocate new pool */
  196. newpool = snd_seq_pool_new(poolsize);
  197. if (newpool == NULL)
  198. return -ENOMEM;
  199. if (snd_seq_pool_init(newpool) < 0) {
  200. snd_seq_pool_delete(&newpool);
  201. return -ENOMEM;
  202. }
  203. spin_lock_irqsave(&f->lock, flags);
  204. /* remember old pool */
  205. oldpool = f->pool;
  206. oldhead = f->head;
  207. /* exchange pools */
  208. f->pool = newpool;
  209. f->head = NULL;
  210. f->tail = NULL;
  211. f->cells = 0;
  212. /* NOTE: overflow flag is not cleared */
  213. spin_unlock_irqrestore(&f->lock, flags);
  214. /* release cells in old pool */
  215. for (cell = oldhead; cell; cell = next) {
  216. next = cell->next;
  217. snd_seq_cell_free(cell);
  218. }
  219. snd_seq_pool_delete(&oldpool);
  220. return 0;
  221. }