v3d_irq.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /* Copyright (C) 2014-2018 Broadcom */
  3. /**
  4. * DOC: Interrupt management for the V3D engine
  5. *
  6. * When we take a binning or rendering flush done interrupt, we need
  7. * to signal the fence for that job so that the scheduler can queue up
  8. * the next one and unblock any waiters.
  9. *
  10. * When we take the binner out of memory interrupt, we need to
  11. * allocate some new memory and pass it to the binner so that the
  12. * current job can make progress.
  13. */
  14. #include "v3d_drv.h"
  15. #include "v3d_regs.h"
  16. #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
  17. V3D_INT_FLDONE | \
  18. V3D_INT_FRDONE | \
  19. V3D_INT_GMPV))
  20. #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
  21. V3D_HUB_INT_MMU_PTI | \
  22. V3D_HUB_INT_MMU_CAP))
  23. static void
  24. v3d_overflow_mem_work(struct work_struct *work)
  25. {
  26. struct v3d_dev *v3d =
  27. container_of(work, struct v3d_dev, overflow_mem_work);
  28. struct drm_device *dev = &v3d->drm;
  29. struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
  30. unsigned long irqflags;
  31. if (IS_ERR(bo)) {
  32. DRM_ERROR("Couldn't allocate binner overflow mem\n");
  33. return;
  34. }
  35. /* We lost a race, and our work task came in after the bin job
  36. * completed and exited. This can happen because the HW
  37. * signals OOM before it's fully OOM, so the binner might just
  38. * barely complete.
  39. *
  40. * If we lose the race and our work task comes in after a new
  41. * bin job got scheduled, that's fine. We'll just give them
  42. * some binner pool anyway.
  43. */
  44. spin_lock_irqsave(&v3d->job_lock, irqflags);
  45. if (!v3d->bin_job) {
  46. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  47. goto out;
  48. }
  49. drm_gem_object_get(&bo->base);
  50. list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
  51. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  52. V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
  53. V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
  54. out:
  55. drm_gem_object_put_unlocked(&bo->base);
  56. }
  57. static irqreturn_t
  58. v3d_irq(int irq, void *arg)
  59. {
  60. struct v3d_dev *v3d = arg;
  61. u32 intsts;
  62. irqreturn_t status = IRQ_NONE;
  63. intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
  64. /* Acknowledge the interrupts we're handling here. */
  65. V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
  66. if (intsts & V3D_INT_OUTOMEM) {
  67. /* Note that the OOM status is edge signaled, so the
  68. * interrupt won't happen again until the we actually
  69. * add more memory.
  70. */
  71. schedule_work(&v3d->overflow_mem_work);
  72. status = IRQ_HANDLED;
  73. }
  74. if (intsts & V3D_INT_FLDONE) {
  75. dma_fence_signal(v3d->bin_job->bin.done_fence);
  76. status = IRQ_HANDLED;
  77. }
  78. if (intsts & V3D_INT_FRDONE) {
  79. dma_fence_signal(v3d->render_job->render.done_fence);
  80. status = IRQ_HANDLED;
  81. }
  82. /* We shouldn't be triggering these if we have GMP in
  83. * always-allowed mode.
  84. */
  85. if (intsts & V3D_INT_GMPV)
  86. dev_err(v3d->dev, "GMP violation\n");
  87. return status;
  88. }
  89. static irqreturn_t
  90. v3d_hub_irq(int irq, void *arg)
  91. {
  92. struct v3d_dev *v3d = arg;
  93. u32 intsts;
  94. irqreturn_t status = IRQ_NONE;
  95. intsts = V3D_READ(V3D_HUB_INT_STS);
  96. /* Acknowledge the interrupts we're handling here. */
  97. V3D_WRITE(V3D_HUB_INT_CLR, intsts);
  98. if (intsts & (V3D_HUB_INT_MMU_WRV |
  99. V3D_HUB_INT_MMU_PTI |
  100. V3D_HUB_INT_MMU_CAP)) {
  101. u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
  102. u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8;
  103. dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n",
  104. axi_id, (long long)vio_addr,
  105. ((intsts & V3D_HUB_INT_MMU_WRV) ?
  106. ", write violation" : ""),
  107. ((intsts & V3D_HUB_INT_MMU_PTI) ?
  108. ", pte invalid" : ""),
  109. ((intsts & V3D_HUB_INT_MMU_CAP) ?
  110. ", cap exceeded" : ""));
  111. status = IRQ_HANDLED;
  112. }
  113. return status;
  114. }
  115. void
  116. v3d_irq_init(struct v3d_dev *v3d)
  117. {
  118. int ret, core;
  119. INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
  120. /* Clear any pending interrupts someone might have left around
  121. * for us.
  122. */
  123. for (core = 0; core < v3d->cores; core++)
  124. V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
  125. V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
  126. ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
  127. v3d_hub_irq, IRQF_SHARED,
  128. "v3d_hub", v3d);
  129. ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
  130. v3d_irq, IRQF_SHARED,
  131. "v3d_core0", v3d);
  132. if (ret)
  133. dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
  134. v3d_irq_enable(v3d);
  135. }
  136. void
  137. v3d_irq_enable(struct v3d_dev *v3d)
  138. {
  139. int core;
  140. /* Enable our set of interrupts, masking out any others. */
  141. for (core = 0; core < v3d->cores; core++) {
  142. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
  143. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
  144. }
  145. V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
  146. V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
  147. }
  148. void
  149. v3d_irq_disable(struct v3d_dev *v3d)
  150. {
  151. int core;
  152. /* Disable all interrupts. */
  153. for (core = 0; core < v3d->cores; core++)
  154. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
  155. V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
  156. /* Clear any pending interrupts we might have left. */
  157. for (core = 0; core < v3d->cores; core++)
  158. V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
  159. V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
  160. cancel_work_sync(&v3d->overflow_mem_work);
  161. }
  162. /** Reinitializes interrupt registers when a GPU reset is performed. */
  163. void v3d_irq_reset(struct v3d_dev *v3d)
  164. {
  165. v3d_irq_enable(v3d);
  166. }