vc4_gem.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Copyright © 2014 Broadcom
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include <linux/module.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/device.h>
  27. #include <linux/io.h>
  28. #include <linux/sched/signal.h>
  29. #include "uapi/drm/vc4_drm.h"
  30. #include "vc4_drv.h"
  31. #include "vc4_regs.h"
  32. #include "vc4_trace.h"
  33. static void
  34. vc4_queue_hangcheck(struct drm_device *dev)
  35. {
  36. struct vc4_dev *vc4 = to_vc4_dev(dev);
  37. mod_timer(&vc4->hangcheck.timer,
  38. round_jiffies_up(jiffies + msecs_to_jiffies(100)));
  39. }
  40. struct vc4_hang_state {
  41. struct drm_vc4_get_hang_state user_state;
  42. u32 bo_count;
  43. struct drm_gem_object **bo;
  44. };
  45. static void
  46. vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
  47. {
  48. unsigned int i;
  49. for (i = 0; i < state->user_state.bo_count; i++)
  50. drm_gem_object_put_unlocked(state->bo[i]);
  51. kfree(state);
  52. }
  53. int
  54. vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
  55. struct drm_file *file_priv)
  56. {
  57. struct drm_vc4_get_hang_state *get_state = data;
  58. struct drm_vc4_get_hang_state_bo *bo_state;
  59. struct vc4_hang_state *kernel_state;
  60. struct drm_vc4_get_hang_state *state;
  61. struct vc4_dev *vc4 = to_vc4_dev(dev);
  62. unsigned long irqflags;
  63. u32 i;
  64. int ret = 0;
  65. spin_lock_irqsave(&vc4->job_lock, irqflags);
  66. kernel_state = vc4->hang_state;
  67. if (!kernel_state) {
  68. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  69. return -ENOENT;
  70. }
  71. state = &kernel_state->user_state;
  72. /* If the user's array isn't big enough, just return the
  73. * required array size.
  74. */
  75. if (get_state->bo_count < state->bo_count) {
  76. get_state->bo_count = state->bo_count;
  77. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  78. return 0;
  79. }
  80. vc4->hang_state = NULL;
  81. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  82. /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
  83. state->bo = get_state->bo;
  84. memcpy(get_state, state, sizeof(*state));
  85. bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
  86. if (!bo_state) {
  87. ret = -ENOMEM;
  88. goto err_free;
  89. }
  90. for (i = 0; i < state->bo_count; i++) {
  91. struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
  92. u32 handle;
  93. ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
  94. &handle);
  95. if (ret) {
  96. state->bo_count = i;
  97. goto err_delete_handle;
  98. }
  99. bo_state[i].handle = handle;
  100. bo_state[i].paddr = vc4_bo->base.paddr;
  101. bo_state[i].size = vc4_bo->base.base.size;
  102. }
  103. if (copy_to_user(u64_to_user_ptr(get_state->bo),
  104. bo_state,
  105. state->bo_count * sizeof(*bo_state)))
  106. ret = -EFAULT;
  107. err_delete_handle:
  108. if (ret) {
  109. for (i = 0; i < state->bo_count; i++)
  110. drm_gem_handle_delete(file_priv, bo_state[i].handle);
  111. }
  112. err_free:
  113. vc4_free_hang_state(dev, kernel_state);
  114. kfree(bo_state);
  115. return ret;
  116. }
  117. static void
  118. vc4_save_hang_state(struct drm_device *dev)
  119. {
  120. struct vc4_dev *vc4 = to_vc4_dev(dev);
  121. struct drm_vc4_get_hang_state *state;
  122. struct vc4_hang_state *kernel_state;
  123. struct vc4_exec_info *exec[2];
  124. struct vc4_bo *bo;
  125. unsigned long irqflags;
  126. unsigned int i, j, k, unref_list_count;
  127. kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
  128. if (!kernel_state)
  129. return;
  130. state = &kernel_state->user_state;
  131. spin_lock_irqsave(&vc4->job_lock, irqflags);
  132. exec[0] = vc4_first_bin_job(vc4);
  133. exec[1] = vc4_first_render_job(vc4);
  134. if (!exec[0] && !exec[1]) {
  135. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  136. return;
  137. }
  138. /* Get the bos from both binner and renderer into hang state. */
  139. state->bo_count = 0;
  140. for (i = 0; i < 2; i++) {
  141. if (!exec[i])
  142. continue;
  143. unref_list_count = 0;
  144. list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
  145. unref_list_count++;
  146. state->bo_count += exec[i]->bo_count + unref_list_count;
  147. }
  148. kernel_state->bo = kcalloc(state->bo_count,
  149. sizeof(*kernel_state->bo), GFP_ATOMIC);
  150. if (!kernel_state->bo) {
  151. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  152. return;
  153. }
  154. k = 0;
  155. for (i = 0; i < 2; i++) {
  156. if (!exec[i])
  157. continue;
  158. for (j = 0; j < exec[i]->bo_count; j++) {
  159. bo = to_vc4_bo(&exec[i]->bo[j]->base);
  160. /* Retain BOs just in case they were marked purgeable.
  161. * This prevents the BO from being purged before
  162. * someone had a chance to dump the hang state.
  163. */
  164. WARN_ON(!refcount_read(&bo->usecnt));
  165. refcount_inc(&bo->usecnt);
  166. drm_gem_object_get(&exec[i]->bo[j]->base);
  167. kernel_state->bo[k++] = &exec[i]->bo[j]->base;
  168. }
  169. list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
  170. /* No need to retain BOs coming from the ->unref_list
  171. * because they are naturally unpurgeable.
  172. */
  173. drm_gem_object_get(&bo->base.base);
  174. kernel_state->bo[k++] = &bo->base.base;
  175. }
  176. }
  177. WARN_ON_ONCE(k != state->bo_count);
  178. if (exec[0])
  179. state->start_bin = exec[0]->ct0ca;
  180. if (exec[1])
  181. state->start_render = exec[1]->ct1ca;
  182. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  183. state->ct0ca = V3D_READ(V3D_CTNCA(0));
  184. state->ct0ea = V3D_READ(V3D_CTNEA(0));
  185. state->ct1ca = V3D_READ(V3D_CTNCA(1));
  186. state->ct1ea = V3D_READ(V3D_CTNEA(1));
  187. state->ct0cs = V3D_READ(V3D_CTNCS(0));
  188. state->ct1cs = V3D_READ(V3D_CTNCS(1));
  189. state->ct0ra0 = V3D_READ(V3D_CT00RA0);
  190. state->ct1ra0 = V3D_READ(V3D_CT01RA0);
  191. state->bpca = V3D_READ(V3D_BPCA);
  192. state->bpcs = V3D_READ(V3D_BPCS);
  193. state->bpoa = V3D_READ(V3D_BPOA);
  194. state->bpos = V3D_READ(V3D_BPOS);
  195. state->vpmbase = V3D_READ(V3D_VPMBASE);
  196. state->dbge = V3D_READ(V3D_DBGE);
  197. state->fdbgo = V3D_READ(V3D_FDBGO);
  198. state->fdbgb = V3D_READ(V3D_FDBGB);
  199. state->fdbgr = V3D_READ(V3D_FDBGR);
  200. state->fdbgs = V3D_READ(V3D_FDBGS);
  201. state->errstat = V3D_READ(V3D_ERRSTAT);
  202. /* We need to turn purgeable BOs into unpurgeable ones so that
  203. * userspace has a chance to dump the hang state before the kernel
  204. * decides to purge those BOs.
  205. * Note that BO consistency at dump time cannot be guaranteed. For
  206. * example, if the owner of these BOs decides to re-use them or mark
  207. * them purgeable again there's nothing we can do to prevent it.
  208. */
  209. for (i = 0; i < kernel_state->user_state.bo_count; i++) {
  210. struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
  211. if (bo->madv == __VC4_MADV_NOTSUPP)
  212. continue;
  213. mutex_lock(&bo->madv_lock);
  214. if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
  215. bo->madv = VC4_MADV_WILLNEED;
  216. refcount_dec(&bo->usecnt);
  217. mutex_unlock(&bo->madv_lock);
  218. }
  219. spin_lock_irqsave(&vc4->job_lock, irqflags);
  220. if (vc4->hang_state) {
  221. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  222. vc4_free_hang_state(dev, kernel_state);
  223. } else {
  224. vc4->hang_state = kernel_state;
  225. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  226. }
  227. }
  228. static void
  229. vc4_reset(struct drm_device *dev)
  230. {
  231. struct vc4_dev *vc4 = to_vc4_dev(dev);
  232. DRM_INFO("Resetting GPU.\n");
  233. mutex_lock(&vc4->power_lock);
  234. if (vc4->power_refcount) {
  235. /* Power the device off and back on the by dropping the
  236. * reference on runtime PM.
  237. */
  238. pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
  239. pm_runtime_get_sync(&vc4->v3d->pdev->dev);
  240. }
  241. mutex_unlock(&vc4->power_lock);
  242. vc4_irq_reset(dev);
  243. /* Rearm the hangcheck -- another job might have been waiting
  244. * for our hung one to get kicked off, and vc4_irq_reset()
  245. * would have started it.
  246. */
  247. vc4_queue_hangcheck(dev);
  248. }
  249. static void
  250. vc4_reset_work(struct work_struct *work)
  251. {
  252. struct vc4_dev *vc4 =
  253. container_of(work, struct vc4_dev, hangcheck.reset_work);
  254. vc4_save_hang_state(vc4->dev);
  255. vc4_reset(vc4->dev);
  256. }
  257. static void
  258. vc4_hangcheck_elapsed(struct timer_list *t)
  259. {
  260. struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
  261. struct drm_device *dev = vc4->dev;
  262. uint32_t ct0ca, ct1ca;
  263. unsigned long irqflags;
  264. struct vc4_exec_info *bin_exec, *render_exec;
  265. spin_lock_irqsave(&vc4->job_lock, irqflags);
  266. bin_exec = vc4_first_bin_job(vc4);
  267. render_exec = vc4_first_render_job(vc4);
  268. /* If idle, we can stop watching for hangs. */
  269. if (!bin_exec && !render_exec) {
  270. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  271. return;
  272. }
  273. ct0ca = V3D_READ(V3D_CTNCA(0));
  274. ct1ca = V3D_READ(V3D_CTNCA(1));
  275. /* If we've made any progress in execution, rearm the timer
  276. * and wait.
  277. */
  278. if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
  279. (render_exec && ct1ca != render_exec->last_ct1ca)) {
  280. if (bin_exec)
  281. bin_exec->last_ct0ca = ct0ca;
  282. if (render_exec)
  283. render_exec->last_ct1ca = ct1ca;
  284. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  285. vc4_queue_hangcheck(dev);
  286. return;
  287. }
  288. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  289. /* We've gone too long with no progress, reset. This has to
  290. * be done from a work struct, since resetting can sleep and
  291. * this timer hook isn't allowed to.
  292. */
  293. schedule_work(&vc4->hangcheck.reset_work);
  294. }
  295. static void
  296. submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
  297. {
  298. struct vc4_dev *vc4 = to_vc4_dev(dev);
  299. /* Set the current and end address of the control list.
  300. * Writing the end register is what starts the job.
  301. */
  302. V3D_WRITE(V3D_CTNCA(thread), start);
  303. V3D_WRITE(V3D_CTNEA(thread), end);
  304. }
  305. int
  306. vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
  307. bool interruptible)
  308. {
  309. struct vc4_dev *vc4 = to_vc4_dev(dev);
  310. int ret = 0;
  311. unsigned long timeout_expire;
  312. DEFINE_WAIT(wait);
  313. if (vc4->finished_seqno >= seqno)
  314. return 0;
  315. if (timeout_ns == 0)
  316. return -ETIME;
  317. timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
  318. trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
  319. for (;;) {
  320. prepare_to_wait(&vc4->job_wait_queue, &wait,
  321. interruptible ? TASK_INTERRUPTIBLE :
  322. TASK_UNINTERRUPTIBLE);
  323. if (interruptible && signal_pending(current)) {
  324. ret = -ERESTARTSYS;
  325. break;
  326. }
  327. if (vc4->finished_seqno >= seqno)
  328. break;
  329. if (timeout_ns != ~0ull) {
  330. if (time_after_eq(jiffies, timeout_expire)) {
  331. ret = -ETIME;
  332. break;
  333. }
  334. schedule_timeout(timeout_expire - jiffies);
  335. } else {
  336. schedule();
  337. }
  338. }
  339. finish_wait(&vc4->job_wait_queue, &wait);
  340. trace_vc4_wait_for_seqno_end(dev, seqno);
  341. return ret;
  342. }
  343. static void
  344. vc4_flush_caches(struct drm_device *dev)
  345. {
  346. struct vc4_dev *vc4 = to_vc4_dev(dev);
  347. /* Flush the GPU L2 caches. These caches sit on top of system
  348. * L3 (the 128kb or so shared with the CPU), and are
  349. * non-allocating in the L3.
  350. */
  351. V3D_WRITE(V3D_L2CACTL,
  352. V3D_L2CACTL_L2CCLR);
  353. V3D_WRITE(V3D_SLCACTL,
  354. VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
  355. VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
  356. VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
  357. VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
  358. }
  359. static void
  360. vc4_flush_texture_caches(struct drm_device *dev)
  361. {
  362. struct vc4_dev *vc4 = to_vc4_dev(dev);
  363. V3D_WRITE(V3D_L2CACTL,
  364. V3D_L2CACTL_L2CCLR);
  365. V3D_WRITE(V3D_SLCACTL,
  366. VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
  367. VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
  368. }
  369. /* Sets the registers for the next job to be actually be executed in
  370. * the hardware.
  371. *
  372. * The job_lock should be held during this.
  373. */
  374. void
  375. vc4_submit_next_bin_job(struct drm_device *dev)
  376. {
  377. struct vc4_dev *vc4 = to_vc4_dev(dev);
  378. struct vc4_exec_info *exec;
  379. again:
  380. exec = vc4_first_bin_job(vc4);
  381. if (!exec)
  382. return;
  383. vc4_flush_caches(dev);
  384. /* Either put the job in the binner if it uses the binner, or
  385. * immediately move it to the to-be-rendered queue.
  386. */
  387. if (exec->ct0ca != exec->ct0ea) {
  388. submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
  389. } else {
  390. vc4_move_job_to_render(dev, exec);
  391. goto again;
  392. }
  393. }
  394. void
  395. vc4_submit_next_render_job(struct drm_device *dev)
  396. {
  397. struct vc4_dev *vc4 = to_vc4_dev(dev);
  398. struct vc4_exec_info *exec = vc4_first_render_job(vc4);
  399. if (!exec)
  400. return;
  401. /* A previous RCL may have written to one of our textures, and
  402. * our full cache flush at bin time may have occurred before
  403. * that RCL completed. Flush the texture cache now, but not
  404. * the instructions or uniforms (since we don't write those
  405. * from an RCL).
  406. */
  407. vc4_flush_texture_caches(dev);
  408. submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
  409. }
  410. void
  411. vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
  412. {
  413. struct vc4_dev *vc4 = to_vc4_dev(dev);
  414. bool was_empty = list_empty(&vc4->render_job_list);
  415. list_move_tail(&exec->head, &vc4->render_job_list);
  416. if (was_empty)
  417. vc4_submit_next_render_job(dev);
  418. }
  419. static void
  420. vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
  421. {
  422. struct vc4_bo *bo;
  423. unsigned i;
  424. for (i = 0; i < exec->bo_count; i++) {
  425. bo = to_vc4_bo(&exec->bo[i]->base);
  426. bo->seqno = seqno;
  427. reservation_object_add_shared_fence(bo->resv, exec->fence);
  428. }
  429. list_for_each_entry(bo, &exec->unref_list, unref_head) {
  430. bo->seqno = seqno;
  431. }
  432. for (i = 0; i < exec->rcl_write_bo_count; i++) {
  433. bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
  434. bo->write_seqno = seqno;
  435. reservation_object_add_excl_fence(bo->resv, exec->fence);
  436. }
  437. }
  438. static void
  439. vc4_unlock_bo_reservations(struct drm_device *dev,
  440. struct vc4_exec_info *exec,
  441. struct ww_acquire_ctx *acquire_ctx)
  442. {
  443. int i;
  444. for (i = 0; i < exec->bo_count; i++) {
  445. struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
  446. ww_mutex_unlock(&bo->resv->lock);
  447. }
  448. ww_acquire_fini(acquire_ctx);
  449. }
  450. /* Takes the reservation lock on all the BOs being referenced, so that
  451. * at queue submit time we can update the reservations.
  452. *
  453. * We don't lock the RCL the tile alloc/state BOs, or overflow memory
  454. * (all of which are on exec->unref_list). They're entirely private
  455. * to vc4, so we don't attach dma-buf fences to them.
  456. */
  457. static int
  458. vc4_lock_bo_reservations(struct drm_device *dev,
  459. struct vc4_exec_info *exec,
  460. struct ww_acquire_ctx *acquire_ctx)
  461. {
  462. int contended_lock = -1;
  463. int i, ret;
  464. struct vc4_bo *bo;
  465. ww_acquire_init(acquire_ctx, &reservation_ww_class);
  466. retry:
  467. if (contended_lock != -1) {
  468. bo = to_vc4_bo(&exec->bo[contended_lock]->base);
  469. ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
  470. acquire_ctx);
  471. if (ret) {
  472. ww_acquire_done(acquire_ctx);
  473. return ret;
  474. }
  475. }
  476. for (i = 0; i < exec->bo_count; i++) {
  477. if (i == contended_lock)
  478. continue;
  479. bo = to_vc4_bo(&exec->bo[i]->base);
  480. ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
  481. if (ret) {
  482. int j;
  483. for (j = 0; j < i; j++) {
  484. bo = to_vc4_bo(&exec->bo[j]->base);
  485. ww_mutex_unlock(&bo->resv->lock);
  486. }
  487. if (contended_lock != -1 && contended_lock >= i) {
  488. bo = to_vc4_bo(&exec->bo[contended_lock]->base);
  489. ww_mutex_unlock(&bo->resv->lock);
  490. }
  491. if (ret == -EDEADLK) {
  492. contended_lock = i;
  493. goto retry;
  494. }
  495. ww_acquire_done(acquire_ctx);
  496. return ret;
  497. }
  498. }
  499. ww_acquire_done(acquire_ctx);
  500. /* Reserve space for our shared (read-only) fence references,
  501. * before we commit the CL to the hardware.
  502. */
  503. for (i = 0; i < exec->bo_count; i++) {
  504. bo = to_vc4_bo(&exec->bo[i]->base);
  505. ret = reservation_object_reserve_shared(bo->resv);
  506. if (ret) {
  507. vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
  508. return ret;
  509. }
  510. }
  511. return 0;
  512. }
  513. /* Queues a struct vc4_exec_info for execution. If no job is
  514. * currently executing, then submits it.
  515. *
  516. * Unlike most GPUs, our hardware only handles one command list at a
  517. * time. To queue multiple jobs at once, we'd need to edit the
  518. * previous command list to have a jump to the new one at the end, and
  519. * then bump the end address. That's a change for a later date,
  520. * though.
  521. */
  522. static int
  523. vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
  524. struct ww_acquire_ctx *acquire_ctx)
  525. {
  526. struct vc4_dev *vc4 = to_vc4_dev(dev);
  527. uint64_t seqno;
  528. unsigned long irqflags;
  529. struct vc4_fence *fence;
  530. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  531. if (!fence)
  532. return -ENOMEM;
  533. fence->dev = dev;
  534. spin_lock_irqsave(&vc4->job_lock, irqflags);
  535. seqno = ++vc4->emit_seqno;
  536. exec->seqno = seqno;
  537. dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
  538. vc4->dma_fence_context, exec->seqno);
  539. fence->seqno = exec->seqno;
  540. exec->fence = &fence->base;
  541. vc4_update_bo_seqnos(exec, seqno);
  542. vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
  543. list_add_tail(&exec->head, &vc4->bin_job_list);
  544. /* If no job was executing, kick ours off. Otherwise, it'll
  545. * get started when the previous job's flush done interrupt
  546. * occurs.
  547. */
  548. if (vc4_first_bin_job(vc4) == exec) {
  549. vc4_submit_next_bin_job(dev);
  550. vc4_queue_hangcheck(dev);
  551. }
  552. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  553. return 0;
  554. }
  555. /**
  556. * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
  557. * referenced by the job.
  558. * @dev: DRM device
  559. * @file_priv: DRM file for this fd
  560. * @exec: V3D job being set up
  561. *
  562. * The command validator needs to reference BOs by their index within
  563. * the submitted job's BO list. This does the validation of the job's
  564. * BO list and reference counting for the lifetime of the job.
  565. */
  566. static int
  567. vc4_cl_lookup_bos(struct drm_device *dev,
  568. struct drm_file *file_priv,
  569. struct vc4_exec_info *exec)
  570. {
  571. struct drm_vc4_submit_cl *args = exec->args;
  572. uint32_t *handles;
  573. int ret = 0;
  574. int i;
  575. exec->bo_count = args->bo_handle_count;
  576. if (!exec->bo_count) {
  577. /* See comment on bo_index for why we have to check
  578. * this.
  579. */
  580. DRM_DEBUG("Rendering requires BOs to validate\n");
  581. return -EINVAL;
  582. }
  583. exec->bo = kvmalloc_array(exec->bo_count,
  584. sizeof(struct drm_gem_cma_object *),
  585. GFP_KERNEL | __GFP_ZERO);
  586. if (!exec->bo) {
  587. DRM_ERROR("Failed to allocate validated BO pointers\n");
  588. return -ENOMEM;
  589. }
  590. handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
  591. if (!handles) {
  592. ret = -ENOMEM;
  593. DRM_ERROR("Failed to allocate incoming GEM handles\n");
  594. goto fail;
  595. }
  596. if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
  597. exec->bo_count * sizeof(uint32_t))) {
  598. ret = -EFAULT;
  599. DRM_ERROR("Failed to copy in GEM handles\n");
  600. goto fail;
  601. }
  602. spin_lock(&file_priv->table_lock);
  603. for (i = 0; i < exec->bo_count; i++) {
  604. struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
  605. handles[i]);
  606. if (!bo) {
  607. DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
  608. i, handles[i]);
  609. ret = -EINVAL;
  610. break;
  611. }
  612. drm_gem_object_get(bo);
  613. exec->bo[i] = (struct drm_gem_cma_object *)bo;
  614. }
  615. spin_unlock(&file_priv->table_lock);
  616. if (ret)
  617. goto fail_put_bo;
  618. for (i = 0; i < exec->bo_count; i++) {
  619. ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
  620. if (ret)
  621. goto fail_dec_usecnt;
  622. }
  623. kvfree(handles);
  624. return 0;
  625. fail_dec_usecnt:
  626. /* Decrease usecnt on acquired objects.
  627. * We cannot rely on vc4_complete_exec() to release resources here,
  628. * because vc4_complete_exec() has no information about which BO has
  629. * had its ->usecnt incremented.
  630. * To make things easier we just free everything explicitly and set
  631. * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
  632. * step.
  633. */
  634. for (i-- ; i >= 0; i--)
  635. vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
  636. fail_put_bo:
  637. /* Release any reference to acquired objects. */
  638. for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
  639. drm_gem_object_put_unlocked(&exec->bo[i]->base);
  640. fail:
  641. kvfree(handles);
  642. kvfree(exec->bo);
  643. exec->bo = NULL;
  644. return ret;
  645. }
  646. static int
  647. vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
  648. {
  649. struct drm_vc4_submit_cl *args = exec->args;
  650. void *temp = NULL;
  651. void *bin;
  652. int ret = 0;
  653. uint32_t bin_offset = 0;
  654. uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
  655. 16);
  656. uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
  657. uint32_t exec_size = uniforms_offset + args->uniforms_size;
  658. uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
  659. args->shader_rec_count);
  660. struct vc4_bo *bo;
  661. if (shader_rec_offset < args->bin_cl_size ||
  662. uniforms_offset < shader_rec_offset ||
  663. exec_size < uniforms_offset ||
  664. args->shader_rec_count >= (UINT_MAX /
  665. sizeof(struct vc4_shader_state)) ||
  666. temp_size < exec_size) {
  667. DRM_DEBUG("overflow in exec arguments\n");
  668. ret = -EINVAL;
  669. goto fail;
  670. }
  671. /* Allocate space where we'll store the copied in user command lists
  672. * and shader records.
  673. *
  674. * We don't just copy directly into the BOs because we need to
  675. * read the contents back for validation, and I think the
  676. * bo->vaddr is uncached access.
  677. */
  678. temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
  679. if (!temp) {
  680. DRM_ERROR("Failed to allocate storage for copying "
  681. "in bin/render CLs.\n");
  682. ret = -ENOMEM;
  683. goto fail;
  684. }
  685. bin = temp + bin_offset;
  686. exec->shader_rec_u = temp + shader_rec_offset;
  687. exec->uniforms_u = temp + uniforms_offset;
  688. exec->shader_state = temp + exec_size;
  689. exec->shader_state_size = args->shader_rec_count;
  690. if (copy_from_user(bin,
  691. u64_to_user_ptr(args->bin_cl),
  692. args->bin_cl_size)) {
  693. ret = -EFAULT;
  694. goto fail;
  695. }
  696. if (copy_from_user(exec->shader_rec_u,
  697. u64_to_user_ptr(args->shader_rec),
  698. args->shader_rec_size)) {
  699. ret = -EFAULT;
  700. goto fail;
  701. }
  702. if (copy_from_user(exec->uniforms_u,
  703. u64_to_user_ptr(args->uniforms),
  704. args->uniforms_size)) {
  705. ret = -EFAULT;
  706. goto fail;
  707. }
  708. bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
  709. if (IS_ERR(bo)) {
  710. DRM_ERROR("Couldn't allocate BO for binning\n");
  711. ret = PTR_ERR(bo);
  712. goto fail;
  713. }
  714. exec->exec_bo = &bo->base;
  715. list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
  716. &exec->unref_list);
  717. exec->ct0ca = exec->exec_bo->paddr + bin_offset;
  718. exec->bin_u = bin;
  719. exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
  720. exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
  721. exec->shader_rec_size = args->shader_rec_size;
  722. exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
  723. exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
  724. exec->uniforms_size = args->uniforms_size;
  725. ret = vc4_validate_bin_cl(dev,
  726. exec->exec_bo->vaddr + bin_offset,
  727. bin,
  728. exec);
  729. if (ret)
  730. goto fail;
  731. ret = vc4_validate_shader_recs(dev, exec);
  732. if (ret)
  733. goto fail;
  734. /* Block waiting on any previous rendering into the CS's VBO,
  735. * IB, or textures, so that pixels are actually written by the
  736. * time we try to read them.
  737. */
  738. ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
  739. fail:
  740. kvfree(temp);
  741. return ret;
  742. }
  743. static void
  744. vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
  745. {
  746. struct vc4_dev *vc4 = to_vc4_dev(dev);
  747. unsigned long irqflags;
  748. unsigned i;
  749. /* If we got force-completed because of GPU reset rather than
  750. * through our IRQ handler, signal the fence now.
  751. */
  752. if (exec->fence) {
  753. dma_fence_signal(exec->fence);
  754. dma_fence_put(exec->fence);
  755. }
  756. if (exec->bo) {
  757. for (i = 0; i < exec->bo_count; i++) {
  758. struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
  759. vc4_bo_dec_usecnt(bo);
  760. drm_gem_object_put_unlocked(&exec->bo[i]->base);
  761. }
  762. kvfree(exec->bo);
  763. }
  764. while (!list_empty(&exec->unref_list)) {
  765. struct vc4_bo *bo = list_first_entry(&exec->unref_list,
  766. struct vc4_bo, unref_head);
  767. list_del(&bo->unref_head);
  768. drm_gem_object_put_unlocked(&bo->base.base);
  769. }
  770. /* Free up the allocation of any bin slots we used. */
  771. spin_lock_irqsave(&vc4->job_lock, irqflags);
  772. vc4->bin_alloc_used &= ~exec->bin_slots;
  773. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  774. mutex_lock(&vc4->power_lock);
  775. if (--vc4->power_refcount == 0) {
  776. pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
  777. pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
  778. }
  779. mutex_unlock(&vc4->power_lock);
  780. kfree(exec);
  781. }
  782. void
  783. vc4_job_handle_completed(struct vc4_dev *vc4)
  784. {
  785. unsigned long irqflags;
  786. struct vc4_seqno_cb *cb, *cb_temp;
  787. spin_lock_irqsave(&vc4->job_lock, irqflags);
  788. while (!list_empty(&vc4->job_done_list)) {
  789. struct vc4_exec_info *exec =
  790. list_first_entry(&vc4->job_done_list,
  791. struct vc4_exec_info, head);
  792. list_del(&exec->head);
  793. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  794. vc4_complete_exec(vc4->dev, exec);
  795. spin_lock_irqsave(&vc4->job_lock, irqflags);
  796. }
  797. list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
  798. if (cb->seqno <= vc4->finished_seqno) {
  799. list_del_init(&cb->work.entry);
  800. schedule_work(&cb->work);
  801. }
  802. }
  803. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  804. }
  805. static void vc4_seqno_cb_work(struct work_struct *work)
  806. {
  807. struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
  808. cb->func(cb);
  809. }
  810. int vc4_queue_seqno_cb(struct drm_device *dev,
  811. struct vc4_seqno_cb *cb, uint64_t seqno,
  812. void (*func)(struct vc4_seqno_cb *cb))
  813. {
  814. struct vc4_dev *vc4 = to_vc4_dev(dev);
  815. int ret = 0;
  816. unsigned long irqflags;
  817. cb->func = func;
  818. INIT_WORK(&cb->work, vc4_seqno_cb_work);
  819. spin_lock_irqsave(&vc4->job_lock, irqflags);
  820. if (seqno > vc4->finished_seqno) {
  821. cb->seqno = seqno;
  822. list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
  823. } else {
  824. schedule_work(&cb->work);
  825. }
  826. spin_unlock_irqrestore(&vc4->job_lock, irqflags);
  827. return ret;
  828. }
  829. /* Scheduled when any job has been completed, this walks the list of
  830. * jobs that had completed and unrefs their BOs and frees their exec
  831. * structs.
  832. */
  833. static void
  834. vc4_job_done_work(struct work_struct *work)
  835. {
  836. struct vc4_dev *vc4 =
  837. container_of(work, struct vc4_dev, job_done_work);
  838. vc4_job_handle_completed(vc4);
  839. }
  840. static int
  841. vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
  842. uint64_t seqno,
  843. uint64_t *timeout_ns)
  844. {
  845. unsigned long start = jiffies;
  846. int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
  847. if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
  848. uint64_t delta = jiffies_to_nsecs(jiffies - start);
  849. if (*timeout_ns >= delta)
  850. *timeout_ns -= delta;
  851. }
  852. return ret;
  853. }
  854. int
  855. vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
  856. struct drm_file *file_priv)
  857. {
  858. struct drm_vc4_wait_seqno *args = data;
  859. return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
  860. &args->timeout_ns);
  861. }
  862. int
  863. vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
  864. struct drm_file *file_priv)
  865. {
  866. int ret;
  867. struct drm_vc4_wait_bo *args = data;
  868. struct drm_gem_object *gem_obj;
  869. struct vc4_bo *bo;
  870. if (args->pad != 0)
  871. return -EINVAL;
  872. gem_obj = drm_gem_object_lookup(file_priv, args->handle);
  873. if (!gem_obj) {
  874. DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
  875. return -EINVAL;
  876. }
  877. bo = to_vc4_bo(gem_obj);
  878. ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
  879. &args->timeout_ns);
  880. drm_gem_object_put_unlocked(gem_obj);
  881. return ret;
  882. }
  883. /**
  884. * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
  885. * @dev: DRM device
  886. * @data: ioctl argument
  887. * @file_priv: DRM file for this fd
  888. *
  889. * This is the main entrypoint for userspace to submit a 3D frame to
  890. * the GPU. Userspace provides the binner command list (if
  891. * applicable), and the kernel sets up the render command list to draw
  892. * to the framebuffer described in the ioctl, using the command lists
  893. * that the 3D engine's binner will produce.
  894. */
  895. int
  896. vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
  897. struct drm_file *file_priv)
  898. {
  899. struct vc4_dev *vc4 = to_vc4_dev(dev);
  900. struct drm_vc4_submit_cl *args = data;
  901. struct vc4_exec_info *exec;
  902. struct ww_acquire_ctx acquire_ctx;
  903. int ret = 0;
  904. if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
  905. VC4_SUBMIT_CL_FIXED_RCL_ORDER |
  906. VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
  907. VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
  908. DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
  909. return -EINVAL;
  910. }
  911. exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
  912. if (!exec) {
  913. DRM_ERROR("malloc failure on exec struct\n");
  914. return -ENOMEM;
  915. }
  916. mutex_lock(&vc4->power_lock);
  917. if (vc4->power_refcount++ == 0) {
  918. ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
  919. if (ret < 0) {
  920. mutex_unlock(&vc4->power_lock);
  921. vc4->power_refcount--;
  922. kfree(exec);
  923. return ret;
  924. }
  925. }
  926. mutex_unlock(&vc4->power_lock);
  927. exec->args = args;
  928. INIT_LIST_HEAD(&exec->unref_list);
  929. ret = vc4_cl_lookup_bos(dev, file_priv, exec);
  930. if (ret)
  931. goto fail;
  932. if (exec->args->bin_cl_size != 0) {
  933. ret = vc4_get_bcl(dev, exec);
  934. if (ret)
  935. goto fail;
  936. } else {
  937. exec->ct0ca = 0;
  938. exec->ct0ea = 0;
  939. }
  940. ret = vc4_get_rcl(dev, exec);
  941. if (ret)
  942. goto fail;
  943. ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
  944. if (ret)
  945. goto fail;
  946. /* Clear this out of the struct we'll be putting in the queue,
  947. * since it's part of our stack.
  948. */
  949. exec->args = NULL;
  950. ret = vc4_queue_submit(dev, exec, &acquire_ctx);
  951. if (ret)
  952. goto fail;
  953. /* Return the seqno for our job. */
  954. args->seqno = vc4->emit_seqno;
  955. return 0;
  956. fail:
  957. vc4_complete_exec(vc4->dev, exec);
  958. return ret;
  959. }
  960. void
  961. vc4_gem_init(struct drm_device *dev)
  962. {
  963. struct vc4_dev *vc4 = to_vc4_dev(dev);
  964. vc4->dma_fence_context = dma_fence_context_alloc(1);
  965. INIT_LIST_HEAD(&vc4->bin_job_list);
  966. INIT_LIST_HEAD(&vc4->render_job_list);
  967. INIT_LIST_HEAD(&vc4->job_done_list);
  968. INIT_LIST_HEAD(&vc4->seqno_cb_list);
  969. spin_lock_init(&vc4->job_lock);
  970. INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
  971. timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
  972. INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
  973. mutex_init(&vc4->power_lock);
  974. INIT_LIST_HEAD(&vc4->purgeable.list);
  975. mutex_init(&vc4->purgeable.lock);
  976. }
  977. void
  978. vc4_gem_destroy(struct drm_device *dev)
  979. {
  980. struct vc4_dev *vc4 = to_vc4_dev(dev);
  981. /* Waiting for exec to finish would need to be done before
  982. * unregistering V3D.
  983. */
  984. WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
  985. /* V3D should already have disabled its interrupt and cleared
  986. * the overflow allocation registers. Now free the object.
  987. */
  988. if (vc4->bin_bo) {
  989. drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
  990. vc4->bin_bo = NULL;
  991. }
  992. if (vc4->hang_state)
  993. vc4_free_hang_state(dev, vc4->hang_state);
  994. }
  995. int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
  996. struct drm_file *file_priv)
  997. {
  998. struct drm_vc4_gem_madvise *args = data;
  999. struct drm_gem_object *gem_obj;
  1000. struct vc4_bo *bo;
  1001. int ret;
  1002. switch (args->madv) {
  1003. case VC4_MADV_DONTNEED:
  1004. case VC4_MADV_WILLNEED:
  1005. break;
  1006. default:
  1007. return -EINVAL;
  1008. }
  1009. if (args->pad != 0)
  1010. return -EINVAL;
  1011. gem_obj = drm_gem_object_lookup(file_priv, args->handle);
  1012. if (!gem_obj) {
  1013. DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
  1014. return -ENOENT;
  1015. }
  1016. bo = to_vc4_bo(gem_obj);
  1017. /* Only BOs exposed to userspace can be purged. */
  1018. if (bo->madv == __VC4_MADV_NOTSUPP) {
  1019. DRM_DEBUG("madvise not supported on this BO\n");
  1020. ret = -EINVAL;
  1021. goto out_put_gem;
  1022. }
  1023. /* Not sure it's safe to purge imported BOs. Let's just assume it's
  1024. * not until proven otherwise.
  1025. */
  1026. if (gem_obj->import_attach) {
  1027. DRM_DEBUG("madvise not supported on imported BOs\n");
  1028. ret = -EINVAL;
  1029. goto out_put_gem;
  1030. }
  1031. mutex_lock(&bo->madv_lock);
  1032. if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
  1033. !refcount_read(&bo->usecnt)) {
  1034. /* If the BO is about to be marked as purgeable, is not used
  1035. * and is not already purgeable or purged, add it to the
  1036. * purgeable list.
  1037. */
  1038. vc4_bo_add_to_purgeable_pool(bo);
  1039. } else if (args->madv == VC4_MADV_WILLNEED &&
  1040. bo->madv == VC4_MADV_DONTNEED &&
  1041. !refcount_read(&bo->usecnt)) {
  1042. /* The BO has not been purged yet, just remove it from
  1043. * the purgeable list.
  1044. */
  1045. vc4_bo_remove_from_purgeable_pool(bo);
  1046. }
  1047. /* Save the purged state. */
  1048. args->retained = bo->madv != __VC4_MADV_PURGED;
  1049. /* Update internal madv state only if the bo was not purged. */
  1050. if (bo->madv != __VC4_MADV_PURGED)
  1051. bo->madv = args->madv;
  1052. mutex_unlock(&bo->madv_lock);
  1053. ret = 0;
  1054. out_put_gem:
  1055. drm_gem_object_put_unlocked(gem_obj);
  1056. return ret;
  1057. }