vsp1_dl.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. /*
  2. * vsp1_dl.h -- R-Car VSP1 Display List
  3. *
  4. * Copyright (C) 2015 Renesas Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/gfp.h>
  16. #include <linux/slab.h>
  17. #include <linux/workqueue.h>
  18. #include "vsp1.h"
  19. #include "vsp1_dl.h"
  20. #define VSP1_DL_NUM_ENTRIES 256
  21. #define VSP1_DLH_INT_ENABLE (1 << 1)
  22. #define VSP1_DLH_AUTO_START (1 << 0)
  23. struct vsp1_dl_header_list {
  24. u32 num_bytes;
  25. u32 addr;
  26. } __attribute__((__packed__));
  27. struct vsp1_dl_header {
  28. u32 num_lists;
  29. struct vsp1_dl_header_list lists[8];
  30. u32 next_header;
  31. u32 flags;
  32. } __attribute__((__packed__));
  33. struct vsp1_dl_entry {
  34. u32 addr;
  35. u32 data;
  36. } __attribute__((__packed__));
  37. /**
  38. * struct vsp1_dl_body - Display list body
  39. * @list: entry in the display list list of bodies
  40. * @vsp1: the VSP1 device
  41. * @entries: array of entries
  42. * @dma: DMA address of the entries
  43. * @size: size of the DMA memory in bytes
  44. * @num_entries: number of stored entries
  45. */
  46. struct vsp1_dl_body {
  47. struct list_head list;
  48. struct vsp1_device *vsp1;
  49. struct vsp1_dl_entry *entries;
  50. dma_addr_t dma;
  51. size_t size;
  52. unsigned int num_entries;
  53. };
  54. /**
  55. * struct vsp1_dl_list - Display list
  56. * @list: entry in the display list manager lists
  57. * @dlm: the display list manager
  58. * @header: display list header, NULL for headerless lists
  59. * @dma: DMA address for the header
  60. * @body0: first display list body
  61. * @fragments: list of extra display list bodies
  62. * @chain: entry in the display list partition chain
  63. */
  64. struct vsp1_dl_list {
  65. struct list_head list;
  66. struct vsp1_dl_manager *dlm;
  67. struct vsp1_dl_header *header;
  68. dma_addr_t dma;
  69. struct vsp1_dl_body body0;
  70. struct list_head fragments;
  71. bool has_chain;
  72. struct list_head chain;
  73. };
  74. enum vsp1_dl_mode {
  75. VSP1_DL_MODE_HEADER,
  76. VSP1_DL_MODE_HEADERLESS,
  77. };
  78. /**
  79. * struct vsp1_dl_manager - Display List manager
  80. * @index: index of the related WPF
  81. * @mode: display list operation mode (header or headerless)
  82. * @vsp1: the VSP1 device
  83. * @lock: protects the free, active, queued, pending and gc_fragments lists
  84. * @free: array of all free display lists
  85. * @active: list currently being processed (loaded) by hardware
  86. * @queued: list queued to the hardware (written to the DL registers)
  87. * @pending: list waiting to be queued to the hardware
  88. * @gc_work: fragments garbage collector work struct
  89. * @gc_fragments: array of display list fragments waiting to be freed
  90. */
  91. struct vsp1_dl_manager {
  92. unsigned int index;
  93. enum vsp1_dl_mode mode;
  94. struct vsp1_device *vsp1;
  95. spinlock_t lock;
  96. struct list_head free;
  97. struct vsp1_dl_list *active;
  98. struct vsp1_dl_list *queued;
  99. struct vsp1_dl_list *pending;
  100. struct work_struct gc_work;
  101. struct list_head gc_fragments;
  102. };
  103. /* -----------------------------------------------------------------------------
  104. * Display List Body Management
  105. */
  106. /*
  107. * Initialize a display list body object and allocate DMA memory for the body
  108. * data. The display list body object is expected to have been initialized to
  109. * 0 when allocated.
  110. */
  111. static int vsp1_dl_body_init(struct vsp1_device *vsp1,
  112. struct vsp1_dl_body *dlb, unsigned int num_entries,
  113. size_t extra_size)
  114. {
  115. size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
  116. dlb->vsp1 = vsp1;
  117. dlb->size = size;
  118. dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
  119. GFP_KERNEL);
  120. if (!dlb->entries)
  121. return -ENOMEM;
  122. return 0;
  123. }
  124. /*
  125. * Cleanup a display list body and free allocated DMA memory allocated.
  126. */
  127. static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
  128. {
  129. dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
  130. }
  131. /**
  132. * vsp1_dl_fragment_alloc - Allocate a display list fragment
  133. * @vsp1: The VSP1 device
  134. * @num_entries: The maximum number of entries that the fragment can contain
  135. *
  136. * Allocate a display list fragment with enough memory to contain the requested
  137. * number of entries.
  138. *
  139. * Return a pointer to a fragment on success or NULL if memory can't be
  140. * allocated.
  141. */
  142. struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
  143. unsigned int num_entries)
  144. {
  145. struct vsp1_dl_body *dlb;
  146. int ret;
  147. dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
  148. if (!dlb)
  149. return NULL;
  150. ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
  151. if (ret < 0) {
  152. kfree(dlb);
  153. return NULL;
  154. }
  155. return dlb;
  156. }
  157. /**
  158. * vsp1_dl_fragment_free - Free a display list fragment
  159. * @dlb: The fragment
  160. *
  161. * Free the given display list fragment and the associated DMA memory.
  162. *
  163. * Fragments must only be freed explicitly if they are not added to a display
  164. * list, as the display list will take ownership of them and free them
  165. * otherwise. Manual free typically happens at cleanup time for fragments that
  166. * have been allocated but not used.
  167. *
  168. * Passing a NULL pointer to this function is safe, in that case no operation
  169. * will be performed.
  170. */
  171. void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
  172. {
  173. if (!dlb)
  174. return;
  175. vsp1_dl_body_cleanup(dlb);
  176. kfree(dlb);
  177. }
  178. /**
  179. * vsp1_dl_fragment_write - Write a register to a display list fragment
  180. * @dlb: The fragment
  181. * @reg: The register address
  182. * @data: The register value
  183. *
  184. * Write the given register and value to the display list fragment. The maximum
  185. * number of entries that can be written in a fragment is specified when the
  186. * fragment is allocated by vsp1_dl_fragment_alloc().
  187. */
  188. void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
  189. {
  190. dlb->entries[dlb->num_entries].addr = reg;
  191. dlb->entries[dlb->num_entries].data = data;
  192. dlb->num_entries++;
  193. }
  194. /* -----------------------------------------------------------------------------
  195. * Display List Transaction Management
  196. */
  197. static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
  198. {
  199. struct vsp1_dl_list *dl;
  200. size_t header_size;
  201. int ret;
  202. dl = kzalloc(sizeof(*dl), GFP_KERNEL);
  203. if (!dl)
  204. return NULL;
  205. INIT_LIST_HEAD(&dl->fragments);
  206. dl->dlm = dlm;
  207. /*
  208. * Initialize the display list body and allocate DMA memory for the body
  209. * and the optional header. Both are allocated together to avoid memory
  210. * fragmentation, with the header located right after the body in
  211. * memory.
  212. */
  213. header_size = dlm->mode == VSP1_DL_MODE_HEADER
  214. ? ALIGN(sizeof(struct vsp1_dl_header), 8)
  215. : 0;
  216. ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
  217. header_size);
  218. if (ret < 0) {
  219. kfree(dl);
  220. return NULL;
  221. }
  222. if (dlm->mode == VSP1_DL_MODE_HEADER) {
  223. size_t header_offset = VSP1_DL_NUM_ENTRIES
  224. * sizeof(*dl->body0.entries);
  225. dl->header = ((void *)dl->body0.entries) + header_offset;
  226. dl->dma = dl->body0.dma + header_offset;
  227. memset(dl->header, 0, sizeof(*dl->header));
  228. dl->header->lists[0].addr = dl->body0.dma;
  229. }
  230. return dl;
  231. }
  232. static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
  233. {
  234. vsp1_dl_body_cleanup(&dl->body0);
  235. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  236. kfree(dl);
  237. }
  238. /**
  239. * vsp1_dl_list_get - Get a free display list
  240. * @dlm: The display list manager
  241. *
  242. * Get a display list from the pool of free lists and return it.
  243. *
  244. * This function must be called without the display list manager lock held.
  245. */
  246. struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
  247. {
  248. struct vsp1_dl_list *dl = NULL;
  249. unsigned long flags;
  250. spin_lock_irqsave(&dlm->lock, flags);
  251. if (!list_empty(&dlm->free)) {
  252. dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
  253. list_del(&dl->list);
  254. /*
  255. * The display list chain must be initialised to ensure every
  256. * display list can assert list_empty() if it is not in a chain.
  257. */
  258. INIT_LIST_HEAD(&dl->chain);
  259. }
  260. spin_unlock_irqrestore(&dlm->lock, flags);
  261. return dl;
  262. }
  263. /* This function must be called with the display list manager lock held.*/
  264. static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
  265. {
  266. struct vsp1_dl_list *dl_child;
  267. if (!dl)
  268. return;
  269. /*
  270. * Release any linked display-lists which were chained for a single
  271. * hardware operation.
  272. */
  273. if (dl->has_chain) {
  274. list_for_each_entry(dl_child, &dl->chain, chain)
  275. __vsp1_dl_list_put(dl_child);
  276. }
  277. dl->has_chain = false;
  278. /*
  279. * We can't free fragments here as DMA memory can only be freed in
  280. * interruptible context. Move all fragments to the display list
  281. * manager's list of fragments to be freed, they will be
  282. * garbage-collected by the work queue.
  283. */
  284. if (!list_empty(&dl->fragments)) {
  285. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  286. schedule_work(&dl->dlm->gc_work);
  287. }
  288. dl->body0.num_entries = 0;
  289. list_add_tail(&dl->list, &dl->dlm->free);
  290. }
  291. /**
  292. * vsp1_dl_list_put - Release a display list
  293. * @dl: The display list
  294. *
  295. * Release the display list and return it to the pool of free lists.
  296. *
  297. * Passing a NULL pointer to this function is safe, in that case no operation
  298. * will be performed.
  299. */
  300. void vsp1_dl_list_put(struct vsp1_dl_list *dl)
  301. {
  302. unsigned long flags;
  303. if (!dl)
  304. return;
  305. spin_lock_irqsave(&dl->dlm->lock, flags);
  306. __vsp1_dl_list_put(dl);
  307. spin_unlock_irqrestore(&dl->dlm->lock, flags);
  308. }
  309. /**
  310. * vsp1_dl_list_write - Write a register to the display list
  311. * @dl: The display list
  312. * @reg: The register address
  313. * @data: The register value
  314. *
  315. * Write the given register and value to the display list. Up to 256 registers
  316. * can be written per display list.
  317. */
  318. void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
  319. {
  320. vsp1_dl_fragment_write(&dl->body0, reg, data);
  321. }
  322. /**
  323. * vsp1_dl_list_add_fragment - Add a fragment to the display list
  324. * @dl: The display list
  325. * @dlb: The fragment
  326. *
  327. * Add a display list body as a fragment to a display list. Registers contained
  328. * in fragments are processed after registers contained in the main display
  329. * list, in the order in which fragments are added.
  330. *
  331. * Adding a fragment to a display list passes ownership of the fragment to the
  332. * list. The caller must not touch the fragment after this call, and must not
  333. * free it explicitly with vsp1_dl_fragment_free().
  334. *
  335. * Fragments are only usable for display lists in header mode. Attempt to
  336. * add a fragment to a header-less display list will return an error.
  337. */
  338. int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
  339. struct vsp1_dl_body *dlb)
  340. {
  341. /* Multi-body lists are only available in header mode. */
  342. if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
  343. return -EINVAL;
  344. list_add_tail(&dlb->list, &dl->fragments);
  345. return 0;
  346. }
  347. /**
  348. * vsp1_dl_list_add_chain - Add a display list to a chain
  349. * @head: The head display list
  350. * @dl: The new display list
  351. *
  352. * Add a display list to an existing display list chain. The chained lists
  353. * will be automatically processed by the hardware without intervention from
  354. * the CPU. A display list end interrupt will only complete after the last
  355. * display list in the chain has completed processing.
  356. *
  357. * Adding a display list to a chain passes ownership of the display list to
  358. * the head display list item. The chain is released when the head dl item is
  359. * put back with __vsp1_dl_list_put().
  360. *
  361. * Chained display lists are only usable in header mode. Attempts to add a
  362. * display list to a chain in header-less mode will return an error.
  363. */
  364. int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
  365. struct vsp1_dl_list *dl)
  366. {
  367. /* Chained lists are only available in header mode. */
  368. if (head->dlm->mode != VSP1_DL_MODE_HEADER)
  369. return -EINVAL;
  370. head->has_chain = true;
  371. list_add_tail(&dl->chain, &head->chain);
  372. return 0;
  373. }
  374. static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
  375. {
  376. struct vsp1_dl_header_list *hdr = dl->header->lists;
  377. struct vsp1_dl_body *dlb;
  378. unsigned int num_lists = 0;
  379. /*
  380. * Fill the header with the display list bodies addresses and sizes. The
  381. * address of the first body has already been filled when the display
  382. * list was allocated.
  383. */
  384. hdr->num_bytes = dl->body0.num_entries
  385. * sizeof(*dl->header->lists);
  386. list_for_each_entry(dlb, &dl->fragments, list) {
  387. num_lists++;
  388. hdr++;
  389. hdr->addr = dlb->dma;
  390. hdr->num_bytes = dlb->num_entries
  391. * sizeof(*dl->header->lists);
  392. }
  393. dl->header->num_lists = num_lists;
  394. /*
  395. * If this display list's chain is not empty, we are on a list, where
  396. * the next item in the list is the display list entity which should be
  397. * automatically queued by the hardware.
  398. */
  399. if (!list_empty(&dl->chain) && !is_last) {
  400. struct vsp1_dl_list *next = list_next_entry(dl, chain);
  401. dl->header->next_header = next->dma;
  402. dl->header->flags = VSP1_DLH_AUTO_START;
  403. } else {
  404. dl->header->flags = VSP1_DLH_INT_ENABLE;
  405. }
  406. }
  407. void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
  408. {
  409. struct vsp1_dl_manager *dlm = dl->dlm;
  410. struct vsp1_device *vsp1 = dlm->vsp1;
  411. unsigned long flags;
  412. bool update;
  413. spin_lock_irqsave(&dlm->lock, flags);
  414. if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
  415. struct vsp1_dl_list *dl_child;
  416. /*
  417. * In header mode the caller guarantees that the hardware is
  418. * idle at this point.
  419. */
  420. /* Fill the header for the head and chained display lists. */
  421. vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
  422. list_for_each_entry(dl_child, &dl->chain, chain) {
  423. bool last = list_is_last(&dl_child->chain, &dl->chain);
  424. vsp1_dl_list_fill_header(dl_child, last);
  425. }
  426. /*
  427. * Commit the head display list to hardware. Chained headers
  428. * will auto-start.
  429. */
  430. vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
  431. dlm->active = dl;
  432. goto done;
  433. }
  434. /*
  435. * Once the UPD bit has been set the hardware can start processing the
  436. * display list at any time and we can't touch the address and size
  437. * registers. In that case mark the update as pending, it will be
  438. * queued up to the hardware by the frame end interrupt handler.
  439. */
  440. update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
  441. if (update) {
  442. __vsp1_dl_list_put(dlm->pending);
  443. dlm->pending = dl;
  444. goto done;
  445. }
  446. /*
  447. * Program the hardware with the display list body address and size.
  448. * The UPD bit will be cleared by the device when the display list is
  449. * processed.
  450. */
  451. vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
  452. vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
  453. (dl->body0.num_entries * sizeof(*dl->header->lists)));
  454. __vsp1_dl_list_put(dlm->queued);
  455. dlm->queued = dl;
  456. done:
  457. spin_unlock_irqrestore(&dlm->lock, flags);
  458. }
  459. /* -----------------------------------------------------------------------------
  460. * Display List Manager
  461. */
  462. /* Interrupt Handling */
  463. void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
  464. {
  465. spin_lock(&dlm->lock);
  466. /*
  467. * The display start interrupt signals the end of the display list
  468. * processing by the device. The active display list, if any, won't be
  469. * accessed anymore and can be reused.
  470. */
  471. __vsp1_dl_list_put(dlm->active);
  472. dlm->active = NULL;
  473. spin_unlock(&dlm->lock);
  474. }
  475. void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
  476. {
  477. struct vsp1_device *vsp1 = dlm->vsp1;
  478. spin_lock(&dlm->lock);
  479. __vsp1_dl_list_put(dlm->active);
  480. dlm->active = NULL;
  481. /*
  482. * Header mode is used for mem-to-mem pipelines only. We don't need to
  483. * perform any operation as there can't be any new display list queued
  484. * in that case.
  485. */
  486. if (dlm->mode == VSP1_DL_MODE_HEADER)
  487. goto done;
  488. /*
  489. * The UPD bit set indicates that the commit operation raced with the
  490. * interrupt and occurred after the frame end event and UPD clear but
  491. * before interrupt processing. The hardware hasn't taken the update
  492. * into account yet, we'll thus skip one frame and retry.
  493. */
  494. if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
  495. goto done;
  496. /*
  497. * The device starts processing the queued display list right after the
  498. * frame end interrupt. The display list thus becomes active.
  499. */
  500. if (dlm->queued) {
  501. dlm->active = dlm->queued;
  502. dlm->queued = NULL;
  503. }
  504. /*
  505. * Now that the UPD bit has been cleared we can queue the next display
  506. * list to the hardware if one has been prepared.
  507. */
  508. if (dlm->pending) {
  509. struct vsp1_dl_list *dl = dlm->pending;
  510. vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
  511. vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
  512. (dl->body0.num_entries *
  513. sizeof(*dl->header->lists)));
  514. dlm->queued = dl;
  515. dlm->pending = NULL;
  516. }
  517. done:
  518. spin_unlock(&dlm->lock);
  519. }
  520. /* Hardware Setup */
  521. void vsp1_dlm_setup(struct vsp1_device *vsp1)
  522. {
  523. u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
  524. | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
  525. | VI6_DL_CTRL_DLE;
  526. /*
  527. * The DRM pipeline operates with display lists in Continuous Frame
  528. * Mode, all other pipelines use manual start.
  529. */
  530. if (vsp1->drm)
  531. ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
  532. vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
  533. vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
  534. }
  535. void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
  536. {
  537. unsigned long flags;
  538. spin_lock_irqsave(&dlm->lock, flags);
  539. __vsp1_dl_list_put(dlm->active);
  540. __vsp1_dl_list_put(dlm->queued);
  541. __vsp1_dl_list_put(dlm->pending);
  542. spin_unlock_irqrestore(&dlm->lock, flags);
  543. dlm->active = NULL;
  544. dlm->queued = NULL;
  545. dlm->pending = NULL;
  546. }
  547. /*
  548. * Free all fragments awaiting to be garbage-collected.
  549. *
  550. * This function must be called without the display list manager lock held.
  551. */
  552. static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
  553. {
  554. unsigned long flags;
  555. spin_lock_irqsave(&dlm->lock, flags);
  556. while (!list_empty(&dlm->gc_fragments)) {
  557. struct vsp1_dl_body *dlb;
  558. dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
  559. list);
  560. list_del(&dlb->list);
  561. spin_unlock_irqrestore(&dlm->lock, flags);
  562. vsp1_dl_fragment_free(dlb);
  563. spin_lock_irqsave(&dlm->lock, flags);
  564. }
  565. spin_unlock_irqrestore(&dlm->lock, flags);
  566. }
  567. static void vsp1_dlm_garbage_collect(struct work_struct *work)
  568. {
  569. struct vsp1_dl_manager *dlm =
  570. container_of(work, struct vsp1_dl_manager, gc_work);
  571. vsp1_dlm_fragments_free(dlm);
  572. }
  573. struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
  574. unsigned int index,
  575. unsigned int prealloc)
  576. {
  577. struct vsp1_dl_manager *dlm;
  578. unsigned int i;
  579. dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
  580. if (!dlm)
  581. return NULL;
  582. dlm->index = index;
  583. dlm->mode = index == 0 && !vsp1->info->uapi
  584. ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
  585. dlm->vsp1 = vsp1;
  586. spin_lock_init(&dlm->lock);
  587. INIT_LIST_HEAD(&dlm->free);
  588. INIT_LIST_HEAD(&dlm->gc_fragments);
  589. INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
  590. for (i = 0; i < prealloc; ++i) {
  591. struct vsp1_dl_list *dl;
  592. dl = vsp1_dl_list_alloc(dlm);
  593. if (!dl)
  594. return NULL;
  595. list_add_tail(&dl->list, &dlm->free);
  596. }
  597. return dlm;
  598. }
  599. void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
  600. {
  601. struct vsp1_dl_list *dl, *next;
  602. if (!dlm)
  603. return;
  604. cancel_work_sync(&dlm->gc_work);
  605. list_for_each_entry_safe(dl, next, &dlm->free, list) {
  606. list_del(&dl->list);
  607. vsp1_dl_list_free(dl);
  608. }
  609. vsp1_dlm_fragments_free(dlm);
  610. }