v3d_irq.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /* Copyright (C) 2014-2018 Broadcom */
  3. /**
  4. * DOC: Interrupt management for the V3D engine
  5. *
  6. * When we take a binning or rendering flush done interrupt, we need
  7. * to signal the fence for that job so that the scheduler can queue up
  8. * the next one and unblock any waiters.
  9. *
  10. * When we take the binner out of memory interrupt, we need to
  11. * allocate some new memory and pass it to the binner so that the
  12. * current job can make progress.
  13. */
  14. #include "v3d_drv.h"
  15. #include "v3d_regs.h"
  16. #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
  17. V3D_INT_FLDONE | \
  18. V3D_INT_FRDONE | \
  19. V3D_INT_GMPV))
  20. #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
  21. V3D_HUB_INT_MMU_PTI | \
  22. V3D_HUB_INT_MMU_CAP))
  23. static void
  24. v3d_overflow_mem_work(struct work_struct *work)
  25. {
  26. struct v3d_dev *v3d =
  27. container_of(work, struct v3d_dev, overflow_mem_work);
  28. struct drm_device *dev = &v3d->drm;
  29. struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
  30. unsigned long irqflags;
  31. if (IS_ERR(bo)) {
  32. DRM_ERROR("Couldn't allocate binner overflow mem\n");
  33. return;
  34. }
  35. /* We lost a race, and our work task came in after the bin job
  36. * completed and exited. This can happen because the HW
  37. * signals OOM before it's fully OOM, so the binner might just
  38. * barely complete.
  39. *
  40. * If we lose the race and our work task comes in after a new
  41. * bin job got scheduled, that's fine. We'll just give them
  42. * some binner pool anyway.
  43. */
  44. spin_lock_irqsave(&v3d->job_lock, irqflags);
  45. if (!v3d->bin_job) {
  46. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  47. goto out;
  48. }
  49. drm_gem_object_get(&bo->base);
  50. list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
  51. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  52. V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
  53. V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
  54. out:
  55. drm_gem_object_put_unlocked(&bo->base);
  56. }
  57. static irqreturn_t
  58. v3d_irq(int irq, void *arg)
  59. {
  60. struct v3d_dev *v3d = arg;
  61. u32 intsts;
  62. irqreturn_t status = IRQ_NONE;
  63. intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
  64. /* Acknowledge the interrupts we're handling here. */
  65. V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
  66. if (intsts & V3D_INT_OUTOMEM) {
  67. /* Note that the OOM status is edge signaled, so the
  68. * interrupt won't happen again until the we actually
  69. * add more memory.
  70. */
  71. schedule_work(&v3d->overflow_mem_work);
  72. status = IRQ_HANDLED;
  73. }
  74. if (intsts & V3D_INT_FLDONE) {
  75. v3d->queue[V3D_BIN].finished_seqno++;
  76. dma_fence_signal(v3d->bin_job->bin.done_fence);
  77. status = IRQ_HANDLED;
  78. }
  79. if (intsts & V3D_INT_FRDONE) {
  80. v3d->queue[V3D_RENDER].finished_seqno++;
  81. dma_fence_signal(v3d->render_job->render.done_fence);
  82. status = IRQ_HANDLED;
  83. }
  84. /* We shouldn't be triggering these if we have GMP in
  85. * always-allowed mode.
  86. */
  87. if (intsts & V3D_INT_GMPV)
  88. dev_err(v3d->dev, "GMP violation\n");
  89. return status;
  90. }
  91. static irqreturn_t
  92. v3d_hub_irq(int irq, void *arg)
  93. {
  94. struct v3d_dev *v3d = arg;
  95. u32 intsts;
  96. irqreturn_t status = IRQ_NONE;
  97. intsts = V3D_READ(V3D_HUB_INT_STS);
  98. /* Acknowledge the interrupts we're handling here. */
  99. V3D_WRITE(V3D_HUB_INT_CLR, intsts);
  100. if (intsts & (V3D_HUB_INT_MMU_WRV |
  101. V3D_HUB_INT_MMU_PTI |
  102. V3D_HUB_INT_MMU_CAP)) {
  103. u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
  104. u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
  105. dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
  106. axi_id, (long long)vio_addr,
  107. ((intsts & V3D_HUB_INT_MMU_WRV) ?
  108. ", write violation" : ""),
  109. ((intsts & V3D_HUB_INT_MMU_PTI) ?
  110. ", pte invalid" : ""),
  111. ((intsts & V3D_HUB_INT_MMU_CAP) ?
  112. ", cap exceeded" : ""));
  113. status = IRQ_HANDLED;
  114. }
  115. return status;
  116. }
  117. void
  118. v3d_irq_init(struct v3d_dev *v3d)
  119. {
  120. int ret, core;
  121. INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
  122. /* Clear any pending interrupts someone might have left around
  123. * for us.
  124. */
  125. for (core = 0; core < v3d->cores; core++)
  126. V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
  127. V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
  128. ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
  129. v3d_hub_irq, IRQF_SHARED,
  130. "v3d_hub", v3d);
  131. ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
  132. v3d_irq, IRQF_SHARED,
  133. "v3d_core0", v3d);
  134. if (ret)
  135. dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
  136. v3d_irq_enable(v3d);
  137. }
  138. void
  139. v3d_irq_enable(struct v3d_dev *v3d)
  140. {
  141. int core;
  142. /* Enable our set of interrupts, masking out any others. */
  143. for (core = 0; core < v3d->cores; core++) {
  144. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
  145. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
  146. }
  147. V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
  148. V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
  149. }
  150. void
  151. v3d_irq_disable(struct v3d_dev *v3d)
  152. {
  153. int core;
  154. /* Disable all interrupts. */
  155. for (core = 0; core < v3d->cores; core++)
  156. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
  157. V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
  158. /* Clear any pending interrupts we might have left. */
  159. for (core = 0; core < v3d->cores; core++)
  160. V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
  161. V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
  162. cancel_work_sync(&v3d->overflow_mem_work);
  163. }
  164. /** Reinitializes interrupt registers when a GPU reset is performed. */
  165. void v3d_irq_reset(struct v3d_dev *v3d)
  166. {
  167. v3d_irq_enable(v3d);
  168. }