v3d_sched.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /* Copyright (C) 2018 Broadcom */
  3. /**
  4. * DOC: Broadcom V3D scheduling
  5. *
  6. * The shared DRM GPU scheduler is used to coordinate submitting jobs
  7. * to the hardware. Each DRM fd (roughly a client process) gets its
  8. * own scheduler entity, which will process jobs in order. The GPU
  9. * scheduler will round-robin between clients to submit the next job.
  10. *
  11. * For simplicity, and in order to keep latency low for interactive
  12. * jobs when bulk background jobs are queued up, we submit a new job
  13. * to the HW only when it has completed the last one, instead of
  14. * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
  15. * v3d_job_dependency() to manage the dependency between bin and
  16. * render, instead of having the clients submit jobs with using the
  17. * HW's semaphores to interlock between them.
  18. */
  19. #include <linux/kthread.h>
  20. #include "v3d_drv.h"
  21. #include "v3d_regs.h"
  22. #include "v3d_trace.h"
  23. static struct v3d_job *
  24. to_v3d_job(struct drm_sched_job *sched_job)
  25. {
  26. return container_of(sched_job, struct v3d_job, base);
  27. }
  28. static void
  29. v3d_job_free(struct drm_sched_job *sched_job)
  30. {
  31. struct v3d_job *job = to_v3d_job(sched_job);
  32. v3d_exec_put(job->exec);
  33. }
  34. /**
  35. * Returns the fences that the bin job depends on, one by one.
  36. * v3d_job_run() won't be called until all of them have been signaled.
  37. */
  38. static struct dma_fence *
  39. v3d_job_dependency(struct drm_sched_job *sched_job,
  40. struct drm_sched_entity *s_entity)
  41. {
  42. struct v3d_job *job = to_v3d_job(sched_job);
  43. struct v3d_exec_info *exec = job->exec;
  44. enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
  45. struct dma_fence *fence;
  46. fence = job->in_fence;
  47. if (fence) {
  48. job->in_fence = NULL;
  49. return fence;
  50. }
  51. if (q == V3D_RENDER) {
  52. /* If we had a bin job, the render job definitely depends on
  53. * it. We first have to wait for bin to be scheduled, so that
  54. * its done_fence is created.
  55. */
  56. fence = exec->bin_done_fence;
  57. if (fence) {
  58. exec->bin_done_fence = NULL;
  59. return fence;
  60. }
  61. }
  62. /* XXX: Wait on a fence for switching the GMP if necessary,
  63. * and then do so.
  64. */
  65. return fence;
  66. }
  67. static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
  68. {
  69. struct v3d_job *job = to_v3d_job(sched_job);
  70. struct v3d_exec_info *exec = job->exec;
  71. enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
  72. struct v3d_dev *v3d = exec->v3d;
  73. struct drm_device *dev = &v3d->drm;
  74. struct dma_fence *fence;
  75. unsigned long irqflags;
  76. if (unlikely(job->base.s_fence->finished.error))
  77. return NULL;
  78. /* Lock required around bin_job update vs
  79. * v3d_overflow_mem_work().
  80. */
  81. spin_lock_irqsave(&v3d->job_lock, irqflags);
  82. if (q == V3D_BIN) {
  83. v3d->bin_job = job->exec;
  84. /* Clear out the overflow allocation, so we don't
  85. * reuse the overflow attached to a previous job.
  86. */
  87. V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
  88. } else {
  89. v3d->render_job = job->exec;
  90. }
  91. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  92. /* Can we avoid this flush when q==RENDER? We need to be
  93. * careful of scheduling, though -- imagine job0 rendering to
  94. * texture and job1 reading, and them being executed as bin0,
  95. * bin1, render0, render1, so that render1's flush at bin time
  96. * wasn't enough.
  97. */
  98. v3d_invalidate_caches(v3d);
  99. fence = v3d_fence_create(v3d, q);
  100. if (!fence)
  101. return fence;
  102. if (job->done_fence)
  103. dma_fence_put(job->done_fence);
  104. job->done_fence = dma_fence_get(fence);
  105. trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
  106. job->start, job->end);
  107. if (q == V3D_BIN) {
  108. if (exec->qma) {
  109. V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma);
  110. V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms);
  111. }
  112. if (exec->qts) {
  113. V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
  114. V3D_CLE_CT0QTS_ENABLE |
  115. exec->qts);
  116. }
  117. } else {
  118. /* XXX: Set the QCFG */
  119. }
  120. /* Set the current and end address of the control list.
  121. * Writing the end register is what starts the job.
  122. */
  123. V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start);
  124. V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end);
  125. return fence;
  126. }
  127. static void
  128. v3d_job_timedout(struct drm_sched_job *sched_job)
  129. {
  130. struct v3d_job *job = to_v3d_job(sched_job);
  131. struct v3d_exec_info *exec = job->exec;
  132. struct v3d_dev *v3d = exec->v3d;
  133. enum v3d_queue q;
  134. mutex_lock(&v3d->reset_lock);
  135. /* block scheduler */
  136. for (q = 0; q < V3D_MAX_QUEUES; q++) {
  137. struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
  138. kthread_park(sched->thread);
  139. drm_sched_hw_job_reset(sched, (sched_job->sched == sched ?
  140. sched_job : NULL));
  141. }
  142. /* get the GPU back into the init state */
  143. v3d_reset(v3d);
  144. /* Unblock schedulers and restart their jobs. */
  145. for (q = 0; q < V3D_MAX_QUEUES; q++) {
  146. drm_sched_job_recovery(&v3d->queue[q].sched);
  147. kthread_unpark(v3d->queue[q].sched.thread);
  148. }
  149. mutex_unlock(&v3d->reset_lock);
  150. }
  151. static const struct drm_sched_backend_ops v3d_sched_ops = {
  152. .dependency = v3d_job_dependency,
  153. .run_job = v3d_job_run,
  154. .timedout_job = v3d_job_timedout,
  155. .free_job = v3d_job_free
  156. };
  157. int
  158. v3d_sched_init(struct v3d_dev *v3d)
  159. {
  160. int hw_jobs_limit = 1;
  161. int job_hang_limit = 0;
  162. int hang_limit_ms = 500;
  163. int ret;
  164. ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
  165. &v3d_sched_ops,
  166. hw_jobs_limit, job_hang_limit,
  167. msecs_to_jiffies(hang_limit_ms),
  168. "v3d_bin");
  169. if (ret) {
  170. dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
  171. return ret;
  172. }
  173. ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
  174. &v3d_sched_ops,
  175. hw_jobs_limit, job_hang_limit,
  176. msecs_to_jiffies(hang_limit_ms),
  177. "v3d_render");
  178. if (ret) {
  179. dev_err(v3d->dev, "Failed to create render scheduler: %d.",
  180. ret);
  181. drm_sched_fini(&v3d->queue[V3D_BIN].sched);
  182. return ret;
  183. }
  184. return 0;
  185. }
  186. void
  187. v3d_sched_fini(struct v3d_dev *v3d)
  188. {
  189. enum v3d_queue q;
  190. for (q = 0; q < V3D_MAX_QUEUES; q++)
  191. drm_sched_fini(&v3d->queue[q].sched);
  192. }