vsp1_dl.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. /*
  2. * vsp1_dl.h -- R-Car VSP1 Display List
  3. *
  4. * Copyright (C) 2015 Renesas Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/gfp.h>
  16. #include <linux/slab.h>
  17. #include <linux/workqueue.h>
  18. #include "vsp1.h"
  19. #include "vsp1_dl.h"
  20. #define VSP1_DL_NUM_ENTRIES 256
  21. #define VSP1_DLH_INT_ENABLE (1 << 1)
  22. #define VSP1_DLH_AUTO_START (1 << 0)
  23. struct vsp1_dl_header_list {
  24. u32 num_bytes;
  25. u32 addr;
  26. } __attribute__((__packed__));
  27. struct vsp1_dl_header {
  28. u32 num_lists;
  29. struct vsp1_dl_header_list lists[8];
  30. u32 next_header;
  31. u32 flags;
  32. } __attribute__((__packed__));
  33. struct vsp1_dl_entry {
  34. u32 addr;
  35. u32 data;
  36. } __attribute__((__packed__));
  37. /**
  38. * struct vsp1_dl_body - Display list body
  39. * @list: entry in the display list list of bodies
  40. * @vsp1: the VSP1 device
  41. * @entries: array of entries
  42. * @dma: DMA address of the entries
  43. * @size: size of the DMA memory in bytes
  44. * @num_entries: number of stored entries
  45. */
  46. struct vsp1_dl_body {
  47. struct list_head list;
  48. struct vsp1_device *vsp1;
  49. struct vsp1_dl_entry *entries;
  50. dma_addr_t dma;
  51. size_t size;
  52. unsigned int num_entries;
  53. };
  54. /**
  55. * struct vsp1_dl_list - Display list
  56. * @list: entry in the display list manager lists
  57. * @dlm: the display list manager
  58. * @header: display list header, NULL for headerless lists
  59. * @dma: DMA address for the header
  60. * @body0: first display list body
  61. * @fragments: list of extra display list bodies
  62. * @has_chain: if true, indicates that there's a partition chain
  63. * @chain: entry in the display list partition chain
  64. * @internal: whether the display list is used for internal purpose
  65. */
  66. struct vsp1_dl_list {
  67. struct list_head list;
  68. struct vsp1_dl_manager *dlm;
  69. struct vsp1_dl_header *header;
  70. dma_addr_t dma;
  71. struct vsp1_dl_body body0;
  72. struct list_head fragments;
  73. bool has_chain;
  74. struct list_head chain;
  75. bool internal;
  76. };
  77. enum vsp1_dl_mode {
  78. VSP1_DL_MODE_HEADER,
  79. VSP1_DL_MODE_HEADERLESS,
  80. };
  81. /**
  82. * struct vsp1_dl_manager - Display List manager
  83. * @index: index of the related WPF
  84. * @mode: display list operation mode (header or headerless)
  85. * @singleshot: execute the display list in single-shot mode
  86. * @vsp1: the VSP1 device
  87. * @lock: protects the free, active, queued, pending and gc_fragments lists
  88. * @free: array of all free display lists
  89. * @active: list currently being processed (loaded) by hardware
  90. * @queued: list queued to the hardware (written to the DL registers)
  91. * @pending: list waiting to be queued to the hardware
  92. * @gc_work: fragments garbage collector work struct
  93. * @gc_fragments: array of display list fragments waiting to be freed
  94. */
  95. struct vsp1_dl_manager {
  96. unsigned int index;
  97. enum vsp1_dl_mode mode;
  98. bool singleshot;
  99. struct vsp1_device *vsp1;
  100. spinlock_t lock;
  101. struct list_head free;
  102. struct vsp1_dl_list *active;
  103. struct vsp1_dl_list *queued;
  104. struct vsp1_dl_list *pending;
  105. struct work_struct gc_work;
  106. struct list_head gc_fragments;
  107. };
  108. /* -----------------------------------------------------------------------------
  109. * Display List Body Management
  110. */
  111. /*
  112. * Initialize a display list body object and allocate DMA memory for the body
  113. * data. The display list body object is expected to have been initialized to
  114. * 0 when allocated.
  115. */
  116. static int vsp1_dl_body_init(struct vsp1_device *vsp1,
  117. struct vsp1_dl_body *dlb, unsigned int num_entries,
  118. size_t extra_size)
  119. {
  120. size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
  121. dlb->vsp1 = vsp1;
  122. dlb->size = size;
  123. dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
  124. GFP_KERNEL);
  125. if (!dlb->entries)
  126. return -ENOMEM;
  127. return 0;
  128. }
  129. /*
  130. * Cleanup a display list body and free allocated DMA memory allocated.
  131. */
  132. static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
  133. {
  134. dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
  135. }
  136. /**
  137. * vsp1_dl_fragment_alloc - Allocate a display list fragment
  138. * @vsp1: The VSP1 device
  139. * @num_entries: The maximum number of entries that the fragment can contain
  140. *
  141. * Allocate a display list fragment with enough memory to contain the requested
  142. * number of entries.
  143. *
  144. * Return a pointer to a fragment on success or NULL if memory can't be
  145. * allocated.
  146. */
  147. struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
  148. unsigned int num_entries)
  149. {
  150. struct vsp1_dl_body *dlb;
  151. int ret;
  152. dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
  153. if (!dlb)
  154. return NULL;
  155. ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
  156. if (ret < 0) {
  157. kfree(dlb);
  158. return NULL;
  159. }
  160. return dlb;
  161. }
  162. /**
  163. * vsp1_dl_fragment_free - Free a display list fragment
  164. * @dlb: The fragment
  165. *
  166. * Free the given display list fragment and the associated DMA memory.
  167. *
  168. * Fragments must only be freed explicitly if they are not added to a display
  169. * list, as the display list will take ownership of them and free them
  170. * otherwise. Manual free typically happens at cleanup time for fragments that
  171. * have been allocated but not used.
  172. *
  173. * Passing a NULL pointer to this function is safe, in that case no operation
  174. * will be performed.
  175. */
  176. void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
  177. {
  178. if (!dlb)
  179. return;
  180. vsp1_dl_body_cleanup(dlb);
  181. kfree(dlb);
  182. }
  183. /**
  184. * vsp1_dl_fragment_write - Write a register to a display list fragment
  185. * @dlb: The fragment
  186. * @reg: The register address
  187. * @data: The register value
  188. *
  189. * Write the given register and value to the display list fragment. The maximum
  190. * number of entries that can be written in a fragment is specified when the
  191. * fragment is allocated by vsp1_dl_fragment_alloc().
  192. */
  193. void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
  194. {
  195. dlb->entries[dlb->num_entries].addr = reg;
  196. dlb->entries[dlb->num_entries].data = data;
  197. dlb->num_entries++;
  198. }
  199. /* -----------------------------------------------------------------------------
  200. * Display List Transaction Management
  201. */
  202. static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
  203. {
  204. struct vsp1_dl_list *dl;
  205. size_t header_size;
  206. int ret;
  207. dl = kzalloc(sizeof(*dl), GFP_KERNEL);
  208. if (!dl)
  209. return NULL;
  210. INIT_LIST_HEAD(&dl->fragments);
  211. dl->dlm = dlm;
  212. /*
  213. * Initialize the display list body and allocate DMA memory for the body
  214. * and the optional header. Both are allocated together to avoid memory
  215. * fragmentation, with the header located right after the body in
  216. * memory.
  217. */
  218. header_size = dlm->mode == VSP1_DL_MODE_HEADER
  219. ? ALIGN(sizeof(struct vsp1_dl_header), 8)
  220. : 0;
  221. ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
  222. header_size);
  223. if (ret < 0) {
  224. kfree(dl);
  225. return NULL;
  226. }
  227. if (dlm->mode == VSP1_DL_MODE_HEADER) {
  228. size_t header_offset = VSP1_DL_NUM_ENTRIES
  229. * sizeof(*dl->body0.entries);
  230. dl->header = ((void *)dl->body0.entries) + header_offset;
  231. dl->dma = dl->body0.dma + header_offset;
  232. memset(dl->header, 0, sizeof(*dl->header));
  233. dl->header->lists[0].addr = dl->body0.dma;
  234. }
  235. return dl;
  236. }
  237. static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
  238. {
  239. vsp1_dl_body_cleanup(&dl->body0);
  240. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  241. kfree(dl);
  242. }
  243. /**
  244. * vsp1_dl_list_get - Get a free display list
  245. * @dlm: The display list manager
  246. *
  247. * Get a display list from the pool of free lists and return it.
  248. *
  249. * This function must be called without the display list manager lock held.
  250. */
  251. struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
  252. {
  253. struct vsp1_dl_list *dl = NULL;
  254. unsigned long flags;
  255. spin_lock_irqsave(&dlm->lock, flags);
  256. if (!list_empty(&dlm->free)) {
  257. dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
  258. list_del(&dl->list);
  259. /*
  260. * The display list chain must be initialised to ensure every
  261. * display list can assert list_empty() if it is not in a chain.
  262. */
  263. INIT_LIST_HEAD(&dl->chain);
  264. }
  265. spin_unlock_irqrestore(&dlm->lock, flags);
  266. return dl;
  267. }
  268. /* This function must be called with the display list manager lock held.*/
  269. static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
  270. {
  271. struct vsp1_dl_list *dl_child;
  272. if (!dl)
  273. return;
  274. /*
  275. * Release any linked display-lists which were chained for a single
  276. * hardware operation.
  277. */
  278. if (dl->has_chain) {
  279. list_for_each_entry(dl_child, &dl->chain, chain)
  280. __vsp1_dl_list_put(dl_child);
  281. }
  282. dl->has_chain = false;
  283. /*
  284. * We can't free fragments here as DMA memory can only be freed in
  285. * interruptible context. Move all fragments to the display list
  286. * manager's list of fragments to be freed, they will be
  287. * garbage-collected by the work queue.
  288. */
  289. if (!list_empty(&dl->fragments)) {
  290. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  291. schedule_work(&dl->dlm->gc_work);
  292. }
  293. dl->body0.num_entries = 0;
  294. list_add_tail(&dl->list, &dl->dlm->free);
  295. }
  296. /**
  297. * vsp1_dl_list_put - Release a display list
  298. * @dl: The display list
  299. *
  300. * Release the display list and return it to the pool of free lists.
  301. *
  302. * Passing a NULL pointer to this function is safe, in that case no operation
  303. * will be performed.
  304. */
  305. void vsp1_dl_list_put(struct vsp1_dl_list *dl)
  306. {
  307. unsigned long flags;
  308. if (!dl)
  309. return;
  310. spin_lock_irqsave(&dl->dlm->lock, flags);
  311. __vsp1_dl_list_put(dl);
  312. spin_unlock_irqrestore(&dl->dlm->lock, flags);
  313. }
  314. /**
  315. * vsp1_dl_list_write - Write a register to the display list
  316. * @dl: The display list
  317. * @reg: The register address
  318. * @data: The register value
  319. *
  320. * Write the given register and value to the display list. Up to 256 registers
  321. * can be written per display list.
  322. */
  323. void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
  324. {
  325. vsp1_dl_fragment_write(&dl->body0, reg, data);
  326. }
  327. /**
  328. * vsp1_dl_list_add_fragment - Add a fragment to the display list
  329. * @dl: The display list
  330. * @dlb: The fragment
  331. *
  332. * Add a display list body as a fragment to a display list. Registers contained
  333. * in fragments are processed after registers contained in the main display
  334. * list, in the order in which fragments are added.
  335. *
  336. * Adding a fragment to a display list passes ownership of the fragment to the
  337. * list. The caller must not touch the fragment after this call, and must not
  338. * free it explicitly with vsp1_dl_fragment_free().
  339. *
  340. * Fragments are only usable for display lists in header mode. Attempt to
  341. * add a fragment to a header-less display list will return an error.
  342. */
  343. int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
  344. struct vsp1_dl_body *dlb)
  345. {
  346. /* Multi-body lists are only available in header mode. */
  347. if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
  348. return -EINVAL;
  349. list_add_tail(&dlb->list, &dl->fragments);
  350. return 0;
  351. }
  352. /**
  353. * vsp1_dl_list_add_chain - Add a display list to a chain
  354. * @head: The head display list
  355. * @dl: The new display list
  356. *
  357. * Add a display list to an existing display list chain. The chained lists
  358. * will be automatically processed by the hardware without intervention from
  359. * the CPU. A display list end interrupt will only complete after the last
  360. * display list in the chain has completed processing.
  361. *
  362. * Adding a display list to a chain passes ownership of the display list to
  363. * the head display list item. The chain is released when the head dl item is
  364. * put back with __vsp1_dl_list_put().
  365. *
  366. * Chained display lists are only usable in header mode. Attempts to add a
  367. * display list to a chain in header-less mode will return an error.
  368. */
  369. int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
  370. struct vsp1_dl_list *dl)
  371. {
  372. /* Chained lists are only available in header mode. */
  373. if (head->dlm->mode != VSP1_DL_MODE_HEADER)
  374. return -EINVAL;
  375. head->has_chain = true;
  376. list_add_tail(&dl->chain, &head->chain);
  377. return 0;
  378. }
  379. static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
  380. {
  381. struct vsp1_dl_manager *dlm = dl->dlm;
  382. struct vsp1_dl_header_list *hdr = dl->header->lists;
  383. struct vsp1_dl_body *dlb;
  384. unsigned int num_lists = 0;
  385. /*
  386. * Fill the header with the display list bodies addresses and sizes. The
  387. * address of the first body has already been filled when the display
  388. * list was allocated.
  389. */
  390. hdr->num_bytes = dl->body0.num_entries
  391. * sizeof(*dl->header->lists);
  392. list_for_each_entry(dlb, &dl->fragments, list) {
  393. num_lists++;
  394. hdr++;
  395. hdr->addr = dlb->dma;
  396. hdr->num_bytes = dlb->num_entries
  397. * sizeof(*dl->header->lists);
  398. }
  399. dl->header->num_lists = num_lists;
  400. if (!list_empty(&dl->chain) && !is_last) {
  401. /*
  402. * If this display list's chain is not empty, we are on a list,
  403. * and the next item is the display list that we must queue for
  404. * automatic processing by the hardware.
  405. */
  406. struct vsp1_dl_list *next = list_next_entry(dl, chain);
  407. dl->header->next_header = next->dma;
  408. dl->header->flags = VSP1_DLH_AUTO_START;
  409. } else if (!dlm->singleshot) {
  410. /*
  411. * if the display list manager works in continuous mode, the VSP
  412. * should loop over the display list continuously until
  413. * instructed to do otherwise.
  414. */
  415. dl->header->next_header = dl->dma;
  416. dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
  417. } else {
  418. /*
  419. * Otherwise, in mem-to-mem mode, we work in single-shot mode
  420. * and the next display list must not be started automatically.
  421. */
  422. dl->header->flags = VSP1_DLH_INT_ENABLE;
  423. }
  424. }
  425. static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
  426. {
  427. struct vsp1_device *vsp1 = dlm->vsp1;
  428. if (!dlm->queued)
  429. return false;
  430. /*
  431. * Check whether the VSP1 has taken the update. In headerless mode the
  432. * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
  433. * register, and in header mode by clearing the UPDHDR bit in the CMD
  434. * register.
  435. */
  436. if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
  437. return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
  438. & VI6_DL_BODY_SIZE_UPD);
  439. else
  440. return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
  441. & VI6_CMD_UPDHDR);
  442. }
  443. static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
  444. {
  445. struct vsp1_dl_manager *dlm = dl->dlm;
  446. struct vsp1_device *vsp1 = dlm->vsp1;
  447. if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
  448. /*
  449. * In headerless mode, program the hardware directly with the
  450. * display list body address and size and set the UPD bit. The
  451. * bit will be cleared by the hardware when the display list
  452. * processing starts.
  453. */
  454. vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
  455. vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
  456. (dl->body0.num_entries * sizeof(*dl->header->lists)));
  457. } else {
  458. /*
  459. * In header mode, program the display list header address. If
  460. * the hardware is idle (single-shot mode or first frame in
  461. * continuous mode) it will then be started independently. If
  462. * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
  463. * will be updated with the display list address.
  464. */
  465. vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
  466. }
  467. }
  468. static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
  469. {
  470. struct vsp1_dl_manager *dlm = dl->dlm;
  471. /*
  472. * If a previous display list has been queued to the hardware but not
  473. * processed yet, the VSP can start processing it at any time. In that
  474. * case we can't replace the queued list by the new one, as we could
  475. * race with the hardware. We thus mark the update as pending, it will
  476. * be queued up to the hardware by the frame end interrupt handler.
  477. *
  478. * If a display list is already pending we simply drop it as the new
  479. * display list is assumed to contain a more recent configuration. It is
  480. * an error if the already pending list has the internal flag set, as
  481. * there is then a process waiting for that list to complete. This
  482. * shouldn't happen as the waiting process should perform proper
  483. * locking, but warn just in case.
  484. */
  485. if (vsp1_dl_list_hw_update_pending(dlm)) {
  486. WARN_ON(dlm->pending && dlm->pending->internal);
  487. __vsp1_dl_list_put(dlm->pending);
  488. dlm->pending = dl;
  489. return;
  490. }
  491. /*
  492. * Pass the new display list to the hardware and mark it as queued. It
  493. * will become active when the hardware starts processing it.
  494. */
  495. vsp1_dl_list_hw_enqueue(dl);
  496. __vsp1_dl_list_put(dlm->queued);
  497. dlm->queued = dl;
  498. }
  499. static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
  500. {
  501. struct vsp1_dl_manager *dlm = dl->dlm;
  502. /*
  503. * When working in single-shot mode, the caller guarantees that the
  504. * hardware is idle at this point. Just commit the head display list
  505. * to hardware. Chained lists will be started automatically.
  506. */
  507. vsp1_dl_list_hw_enqueue(dl);
  508. dlm->active = dl;
  509. }
  510. void vsp1_dl_list_commit(struct vsp1_dl_list *dl, bool internal)
  511. {
  512. struct vsp1_dl_manager *dlm = dl->dlm;
  513. struct vsp1_dl_list *dl_child;
  514. unsigned long flags;
  515. if (dlm->mode == VSP1_DL_MODE_HEADER) {
  516. /* Fill the header for the head and chained display lists. */
  517. vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
  518. list_for_each_entry(dl_child, &dl->chain, chain) {
  519. bool last = list_is_last(&dl_child->chain, &dl->chain);
  520. vsp1_dl_list_fill_header(dl_child, last);
  521. }
  522. }
  523. dl->internal = internal;
  524. spin_lock_irqsave(&dlm->lock, flags);
  525. if (dlm->singleshot)
  526. vsp1_dl_list_commit_singleshot(dl);
  527. else
  528. vsp1_dl_list_commit_continuous(dl);
  529. spin_unlock_irqrestore(&dlm->lock, flags);
  530. }
  531. /* -----------------------------------------------------------------------------
  532. * Display List Manager
  533. */
  534. /**
  535. * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
  536. * @dlm: the display list manager
  537. *
  538. * Return a set of flags that indicates display list completion status.
  539. *
  540. * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list
  541. * has completed at frame end. If the flag is not returned display list
  542. * completion has been delayed by one frame because the display list commit
  543. * raced with the frame end interrupt. The function always returns with the flag
  544. * set in header mode as display list processing is then not continuous and
  545. * races never occur.
  546. *
  547. * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
  548. * has completed and had been queued with the internal notification flag.
  549. * Internal notification is only supported for continuous mode.
  550. */
  551. unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
  552. {
  553. unsigned int flags = 0;
  554. spin_lock(&dlm->lock);
  555. /*
  556. * The mem-to-mem pipelines work in single-shot mode. No new display
  557. * list can be queued, we don't have to do anything.
  558. */
  559. if (dlm->singleshot) {
  560. __vsp1_dl_list_put(dlm->active);
  561. dlm->active = NULL;
  562. flags |= VSP1_DL_FRAME_END_COMPLETED;
  563. goto done;
  564. }
  565. /*
  566. * If the commit operation raced with the interrupt and occurred after
  567. * the frame end event but before interrupt processing, the hardware
  568. * hasn't taken the update into account yet. We have to skip one frame
  569. * and retry.
  570. */
  571. if (vsp1_dl_list_hw_update_pending(dlm))
  572. goto done;
  573. /*
  574. * The device starts processing the queued display list right after the
  575. * frame end interrupt. The display list thus becomes active.
  576. */
  577. if (dlm->queued) {
  578. if (dlm->queued->internal)
  579. flags |= VSP1_DL_FRAME_END_INTERNAL;
  580. dlm->queued->internal = false;
  581. __vsp1_dl_list_put(dlm->active);
  582. dlm->active = dlm->queued;
  583. dlm->queued = NULL;
  584. flags |= VSP1_DL_FRAME_END_COMPLETED;
  585. }
  586. /*
  587. * Now that the VSP has started processing the queued display list, we
  588. * can queue the pending display list to the hardware if one has been
  589. * prepared.
  590. */
  591. if (dlm->pending) {
  592. vsp1_dl_list_hw_enqueue(dlm->pending);
  593. dlm->queued = dlm->pending;
  594. dlm->pending = NULL;
  595. }
  596. done:
  597. spin_unlock(&dlm->lock);
  598. return flags;
  599. }
  600. /* Hardware Setup */
  601. void vsp1_dlm_setup(struct vsp1_device *vsp1)
  602. {
  603. u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
  604. | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
  605. | VI6_DL_CTRL_DLE;
  606. /*
  607. * The DRM pipeline operates with display lists in Continuous Frame
  608. * Mode, all other pipelines use manual start.
  609. */
  610. if (vsp1->drm)
  611. ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
  612. vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
  613. vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
  614. }
  615. void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
  616. {
  617. unsigned long flags;
  618. spin_lock_irqsave(&dlm->lock, flags);
  619. __vsp1_dl_list_put(dlm->active);
  620. __vsp1_dl_list_put(dlm->queued);
  621. __vsp1_dl_list_put(dlm->pending);
  622. spin_unlock_irqrestore(&dlm->lock, flags);
  623. dlm->active = NULL;
  624. dlm->queued = NULL;
  625. dlm->pending = NULL;
  626. }
  627. /*
  628. * Free all fragments awaiting to be garbage-collected.
  629. *
  630. * This function must be called without the display list manager lock held.
  631. */
  632. static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
  633. {
  634. unsigned long flags;
  635. spin_lock_irqsave(&dlm->lock, flags);
  636. while (!list_empty(&dlm->gc_fragments)) {
  637. struct vsp1_dl_body *dlb;
  638. dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
  639. list);
  640. list_del(&dlb->list);
  641. spin_unlock_irqrestore(&dlm->lock, flags);
  642. vsp1_dl_fragment_free(dlb);
  643. spin_lock_irqsave(&dlm->lock, flags);
  644. }
  645. spin_unlock_irqrestore(&dlm->lock, flags);
  646. }
  647. static void vsp1_dlm_garbage_collect(struct work_struct *work)
  648. {
  649. struct vsp1_dl_manager *dlm =
  650. container_of(work, struct vsp1_dl_manager, gc_work);
  651. vsp1_dlm_fragments_free(dlm);
  652. }
  653. struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
  654. unsigned int index,
  655. unsigned int prealloc)
  656. {
  657. struct vsp1_dl_manager *dlm;
  658. unsigned int i;
  659. dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
  660. if (!dlm)
  661. return NULL;
  662. dlm->index = index;
  663. dlm->mode = index == 0 && !vsp1->info->uapi
  664. ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
  665. dlm->singleshot = vsp1->info->uapi;
  666. dlm->vsp1 = vsp1;
  667. spin_lock_init(&dlm->lock);
  668. INIT_LIST_HEAD(&dlm->free);
  669. INIT_LIST_HEAD(&dlm->gc_fragments);
  670. INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
  671. for (i = 0; i < prealloc; ++i) {
  672. struct vsp1_dl_list *dl;
  673. dl = vsp1_dl_list_alloc(dlm);
  674. if (!dl)
  675. return NULL;
  676. list_add_tail(&dl->list, &dlm->free);
  677. }
  678. return dlm;
  679. }
  680. void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
  681. {
  682. struct vsp1_dl_list *dl, *next;
  683. if (!dlm)
  684. return;
  685. cancel_work_sync(&dlm->gc_work);
  686. list_for_each_entry_safe(dl, next, &dlm->free, list) {
  687. list_del(&dl->list);
  688. vsp1_dl_list_free(dl);
  689. }
  690. vsp1_dlm_fragments_free(dlm);
  691. }