seq_memory.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /*
  2. * ALSA sequencer Memory Manager
  3. * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@perex.cz>
  5. * 2000 by Takashi Iwai <tiwai@suse.de>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/init.h>
  23. #include <linux/export.h>
  24. #include <linux/slab.h>
  25. #include <linux/sched/signal.h>
  26. #include <linux/vmalloc.h>
  27. #include <sound/core.h>
  28. #include <sound/seq_kernel.h>
  29. #include "seq_memory.h"
  30. #include "seq_queue.h"
  31. #include "seq_info.h"
  32. #include "seq_lock.h"
  33. static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
  34. {
  35. return pool->total_elements - atomic_read(&pool->counter);
  36. }
  37. static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
  38. {
  39. return snd_seq_pool_available(pool) >= pool->room;
  40. }
  41. /*
  42. * Variable length event:
  43. * The event like sysex uses variable length type.
  44. * The external data may be stored in three different formats.
  45. * 1) kernel space
  46. * This is the normal case.
  47. * ext.data.len = length
  48. * ext.data.ptr = buffer pointer
  49. * 2) user space
  50. * When an event is generated via read(), the external data is
  51. * kept in user space until expanded.
  52. * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
  53. * ext.data.ptr = userspace pointer
  54. * 3) chained cells
  55. * When the variable length event is enqueued (in prioq or fifo),
  56. * the external data is decomposed to several cells.
  57. * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
  58. * ext.data.ptr = the additiona cell head
  59. * -> cell.next -> cell.next -> ..
  60. */
  61. /*
  62. * exported:
  63. * call dump function to expand external data.
  64. */
  65. static int get_var_len(const struct snd_seq_event *event)
  66. {
  67. if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
  68. return -EINVAL;
  69. return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  70. }
  71. int snd_seq_dump_var_event(const struct snd_seq_event *event,
  72. snd_seq_dump_func_t func, void *private_data)
  73. {
  74. int len, err;
  75. struct snd_seq_event_cell *cell;
  76. if ((len = get_var_len(event)) <= 0)
  77. return len;
  78. if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
  79. char buf[32];
  80. char __user *curptr = (char __force __user *)event->data.ext.ptr;
  81. while (len > 0) {
  82. int size = sizeof(buf);
  83. if (len < size)
  84. size = len;
  85. if (copy_from_user(buf, curptr, size))
  86. return -EFAULT;
  87. err = func(private_data, buf, size);
  88. if (err < 0)
  89. return err;
  90. curptr += size;
  91. len -= size;
  92. }
  93. return 0;
  94. }
  95. if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
  96. return func(private_data, event->data.ext.ptr, len);
  97. cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
  98. for (; len > 0 && cell; cell = cell->next) {
  99. int size = sizeof(struct snd_seq_event);
  100. if (len < size)
  101. size = len;
  102. err = func(private_data, &cell->event, size);
  103. if (err < 0)
  104. return err;
  105. len -= size;
  106. }
  107. return 0;
  108. }
  109. EXPORT_SYMBOL(snd_seq_dump_var_event);
  110. /*
  111. * exported:
  112. * expand the variable length event to linear buffer space.
  113. */
  114. static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
  115. {
  116. memcpy(*bufptr, src, size);
  117. *bufptr += size;
  118. return 0;
  119. }
  120. static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
  121. {
  122. if (copy_to_user(*bufptr, src, size))
  123. return -EFAULT;
  124. *bufptr += size;
  125. return 0;
  126. }
  127. int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
  128. int in_kernel, int size_aligned)
  129. {
  130. int len, newlen;
  131. int err;
  132. if ((len = get_var_len(event)) < 0)
  133. return len;
  134. newlen = len;
  135. if (size_aligned > 0)
  136. newlen = roundup(len, size_aligned);
  137. if (count < newlen)
  138. return -EAGAIN;
  139. if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
  140. if (! in_kernel)
  141. return -EINVAL;
  142. if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
  143. return -EFAULT;
  144. return newlen;
  145. }
  146. err = snd_seq_dump_var_event(event,
  147. in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
  148. (snd_seq_dump_func_t)seq_copy_in_user,
  149. &buf);
  150. return err < 0 ? err : newlen;
  151. }
  152. EXPORT_SYMBOL(snd_seq_expand_var_event);
  153. /*
  154. * release this cell, free extended data if available
  155. */
  156. static inline void free_cell(struct snd_seq_pool *pool,
  157. struct snd_seq_event_cell *cell)
  158. {
  159. cell->next = pool->free;
  160. pool->free = cell;
  161. atomic_dec(&pool->counter);
  162. }
  163. void snd_seq_cell_free(struct snd_seq_event_cell * cell)
  164. {
  165. unsigned long flags;
  166. struct snd_seq_pool *pool;
  167. if (snd_BUG_ON(!cell))
  168. return;
  169. pool = cell->pool;
  170. if (snd_BUG_ON(!pool))
  171. return;
  172. spin_lock_irqsave(&pool->lock, flags);
  173. free_cell(pool, cell);
  174. if (snd_seq_ev_is_variable(&cell->event)) {
  175. if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
  176. struct snd_seq_event_cell *curp, *nextptr;
  177. curp = cell->event.data.ext.ptr;
  178. for (; curp; curp = nextptr) {
  179. nextptr = curp->next;
  180. curp->next = pool->free;
  181. free_cell(pool, curp);
  182. }
  183. }
  184. }
  185. if (waitqueue_active(&pool->output_sleep)) {
  186. /* has enough space now? */
  187. if (snd_seq_output_ok(pool))
  188. wake_up(&pool->output_sleep);
  189. }
  190. spin_unlock_irqrestore(&pool->lock, flags);
  191. }
  192. /*
  193. * allocate an event cell.
  194. */
  195. static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
  196. struct snd_seq_event_cell **cellp,
  197. int nonblock, struct file *file,
  198. struct mutex *mutexp)
  199. {
  200. struct snd_seq_event_cell *cell;
  201. unsigned long flags;
  202. int err = -EAGAIN;
  203. wait_queue_entry_t wait;
  204. if (pool == NULL)
  205. return -EINVAL;
  206. *cellp = NULL;
  207. init_waitqueue_entry(&wait, current);
  208. spin_lock_irqsave(&pool->lock, flags);
  209. if (pool->ptr == NULL) { /* not initialized */
  210. pr_debug("ALSA: seq: pool is not initialized\n");
  211. err = -EINVAL;
  212. goto __error;
  213. }
  214. while (pool->free == NULL && ! nonblock && ! pool->closing) {
  215. set_current_state(TASK_INTERRUPTIBLE);
  216. add_wait_queue(&pool->output_sleep, &wait);
  217. spin_unlock_irq(&pool->lock);
  218. if (mutexp)
  219. mutex_unlock(mutexp);
  220. schedule();
  221. if (mutexp)
  222. mutex_lock(mutexp);
  223. spin_lock_irq(&pool->lock);
  224. remove_wait_queue(&pool->output_sleep, &wait);
  225. /* interrupted? */
  226. if (signal_pending(current)) {
  227. err = -ERESTARTSYS;
  228. goto __error;
  229. }
  230. }
  231. if (pool->closing) { /* closing.. */
  232. err = -ENOMEM;
  233. goto __error;
  234. }
  235. cell = pool->free;
  236. if (cell) {
  237. int used;
  238. pool->free = cell->next;
  239. atomic_inc(&pool->counter);
  240. used = atomic_read(&pool->counter);
  241. if (pool->max_used < used)
  242. pool->max_used = used;
  243. pool->event_alloc_success++;
  244. /* clear cell pointers */
  245. cell->next = NULL;
  246. err = 0;
  247. } else
  248. pool->event_alloc_failures++;
  249. *cellp = cell;
  250. __error:
  251. spin_unlock_irqrestore(&pool->lock, flags);
  252. return err;
  253. }
  254. /*
  255. * duplicate the event to a cell.
  256. * if the event has external data, the data is decomposed to additional
  257. * cells.
  258. */
  259. int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
  260. struct snd_seq_event_cell **cellp, int nonblock,
  261. struct file *file, struct mutex *mutexp)
  262. {
  263. int ncells, err;
  264. unsigned int extlen;
  265. struct snd_seq_event_cell *cell;
  266. *cellp = NULL;
  267. ncells = 0;
  268. extlen = 0;
  269. if (snd_seq_ev_is_variable(event)) {
  270. extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  271. ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
  272. }
  273. if (ncells >= pool->total_elements)
  274. return -ENOMEM;
  275. err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
  276. if (err < 0)
  277. return err;
  278. /* copy the event */
  279. cell->event = *event;
  280. /* decompose */
  281. if (snd_seq_ev_is_variable(event)) {
  282. int len = extlen;
  283. int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
  284. int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
  285. struct snd_seq_event_cell *src, *tmp, *tail;
  286. char *buf;
  287. cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
  288. cell->event.data.ext.ptr = NULL;
  289. src = (struct snd_seq_event_cell *)event->data.ext.ptr;
  290. buf = (char *)event->data.ext.ptr;
  291. tail = NULL;
  292. while (ncells-- > 0) {
  293. int size = sizeof(struct snd_seq_event);
  294. if (len < size)
  295. size = len;
  296. err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
  297. mutexp);
  298. if (err < 0)
  299. goto __error;
  300. if (cell->event.data.ext.ptr == NULL)
  301. cell->event.data.ext.ptr = tmp;
  302. if (tail)
  303. tail->next = tmp;
  304. tail = tmp;
  305. /* copy chunk */
  306. if (is_chained && src) {
  307. tmp->event = src->event;
  308. src = src->next;
  309. } else if (is_usrptr) {
  310. if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
  311. err = -EFAULT;
  312. goto __error;
  313. }
  314. } else {
  315. memcpy(&tmp->event, buf, size);
  316. }
  317. buf += size;
  318. len -= size;
  319. }
  320. }
  321. *cellp = cell;
  322. return 0;
  323. __error:
  324. snd_seq_cell_free(cell);
  325. return err;
  326. }
  327. /* poll wait */
  328. int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
  329. poll_table *wait)
  330. {
  331. poll_wait(file, &pool->output_sleep, wait);
  332. return snd_seq_output_ok(pool);
  333. }
  334. /* allocate room specified number of events */
  335. int snd_seq_pool_init(struct snd_seq_pool *pool)
  336. {
  337. int cell;
  338. struct snd_seq_event_cell *cellptr;
  339. unsigned long flags;
  340. if (snd_BUG_ON(!pool))
  341. return -EINVAL;
  342. cellptr = vmalloc(array_size(sizeof(struct snd_seq_event_cell),
  343. pool->size));
  344. if (!cellptr)
  345. return -ENOMEM;
  346. /* add new cells to the free cell list */
  347. spin_lock_irqsave(&pool->lock, flags);
  348. if (pool->ptr) {
  349. spin_unlock_irqrestore(&pool->lock, flags);
  350. vfree(cellptr);
  351. return 0;
  352. }
  353. pool->ptr = cellptr;
  354. pool->free = NULL;
  355. for (cell = 0; cell < pool->size; cell++) {
  356. cellptr = pool->ptr + cell;
  357. cellptr->pool = pool;
  358. cellptr->next = pool->free;
  359. pool->free = cellptr;
  360. }
  361. pool->room = (pool->size + 1) / 2;
  362. /* init statistics */
  363. pool->max_used = 0;
  364. pool->total_elements = pool->size;
  365. spin_unlock_irqrestore(&pool->lock, flags);
  366. return 0;
  367. }
  368. /* refuse the further insertion to the pool */
  369. void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
  370. {
  371. unsigned long flags;
  372. if (snd_BUG_ON(!pool))
  373. return;
  374. spin_lock_irqsave(&pool->lock, flags);
  375. pool->closing = 1;
  376. spin_unlock_irqrestore(&pool->lock, flags);
  377. }
  378. /* remove events */
  379. int snd_seq_pool_done(struct snd_seq_pool *pool)
  380. {
  381. unsigned long flags;
  382. struct snd_seq_event_cell *ptr;
  383. if (snd_BUG_ON(!pool))
  384. return -EINVAL;
  385. /* wait for closing all threads */
  386. if (waitqueue_active(&pool->output_sleep))
  387. wake_up(&pool->output_sleep);
  388. while (atomic_read(&pool->counter) > 0)
  389. schedule_timeout_uninterruptible(1);
  390. /* release all resources */
  391. spin_lock_irqsave(&pool->lock, flags);
  392. ptr = pool->ptr;
  393. pool->ptr = NULL;
  394. pool->free = NULL;
  395. pool->total_elements = 0;
  396. spin_unlock_irqrestore(&pool->lock, flags);
  397. vfree(ptr);
  398. spin_lock_irqsave(&pool->lock, flags);
  399. pool->closing = 0;
  400. spin_unlock_irqrestore(&pool->lock, flags);
  401. return 0;
  402. }
  403. /* init new memory pool */
  404. struct snd_seq_pool *snd_seq_pool_new(int poolsize)
  405. {
  406. struct snd_seq_pool *pool;
  407. /* create pool block */
  408. pool = kzalloc(sizeof(*pool), GFP_KERNEL);
  409. if (!pool)
  410. return NULL;
  411. spin_lock_init(&pool->lock);
  412. pool->ptr = NULL;
  413. pool->free = NULL;
  414. pool->total_elements = 0;
  415. atomic_set(&pool->counter, 0);
  416. pool->closing = 0;
  417. init_waitqueue_head(&pool->output_sleep);
  418. pool->size = poolsize;
  419. /* init statistics */
  420. pool->max_used = 0;
  421. return pool;
  422. }
  423. /* remove memory pool */
  424. int snd_seq_pool_delete(struct snd_seq_pool **ppool)
  425. {
  426. struct snd_seq_pool *pool = *ppool;
  427. *ppool = NULL;
  428. if (pool == NULL)
  429. return 0;
  430. snd_seq_pool_mark_closing(pool);
  431. snd_seq_pool_done(pool);
  432. kfree(pool);
  433. return 0;
  434. }
  435. /* exported to seq_clientmgr.c */
  436. void snd_seq_info_pool(struct snd_info_buffer *buffer,
  437. struct snd_seq_pool *pool, char *space)
  438. {
  439. if (pool == NULL)
  440. return;
  441. snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
  442. snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
  443. snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
  444. snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
  445. snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
  446. }