v3d_irq.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /* Copyright (C) 2014-2018 Broadcom */
  3. /**
  4. * DOC: Interrupt management for the V3D engine
  5. *
  6. * When we take a bin, render, TFU done, or CSD done interrupt, we
  7. * need to signal the fence for that job so that the scheduler can
  8. * queue up the next one and unblock any waiters.
  9. *
  10. * When we take the binner out of memory interrupt, we need to
  11. * allocate some new memory and pass it to the binner so that the
  12. * current job can make progress.
  13. */
  14. #include <linux/platform_device.h>
  15. #include "v3d_drv.h"
  16. #include "v3d_regs.h"
  17. #include "v3d_trace.h"
  18. #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
  19. V3D_INT_FLDONE | \
  20. V3D_INT_FRDONE | \
  21. V3D_INT_CSDDONE | \
  22. V3D_INT_GMPV))
  23. #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
  24. V3D_HUB_INT_MMU_PTI | \
  25. V3D_HUB_INT_MMU_CAP | \
  26. V3D_HUB_INT_TFUC))
  27. static irqreturn_t
  28. v3d_hub_irq(int irq, void *arg);
  29. static void
  30. v3d_overflow_mem_work(struct work_struct *work)
  31. {
  32. struct v3d_dev *v3d =
  33. container_of(work, struct v3d_dev, overflow_mem_work);
  34. struct drm_device *dev = &v3d->drm;
  35. struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
  36. struct drm_gem_object *obj;
  37. unsigned long irqflags;
  38. if (IS_ERR(bo)) {
  39. DRM_ERROR("Couldn't allocate binner overflow mem\n");
  40. return;
  41. }
  42. obj = &bo->base.base;
  43. /* We lost a race, and our work task came in after the bin job
  44. * completed and exited. This can happen because the HW
  45. * signals OOM before it's fully OOM, so the binner might just
  46. * barely complete.
  47. *
  48. * If we lose the race and our work task comes in after a new
  49. * bin job got scheduled, that's fine. We'll just give them
  50. * some binner pool anyway.
  51. */
  52. spin_lock_irqsave(&v3d->job_lock, irqflags);
  53. if (!v3d->bin_job) {
  54. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  55. goto out;
  56. }
  57. drm_gem_object_get(obj);
  58. list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
  59. spin_unlock_irqrestore(&v3d->job_lock, irqflags);
  60. V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
  61. V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
  62. out:
  63. drm_gem_object_put(obj);
  64. }
  65. static irqreturn_t
  66. v3d_irq(int irq, void *arg)
  67. {
  68. struct v3d_dev *v3d = arg;
  69. u32 intsts;
  70. irqreturn_t status = IRQ_NONE;
  71. intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS);
  72. /* Acknowledge the interrupts we're handling here. */
  73. V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts);
  74. if (intsts & V3D_INT_OUTOMEM) {
  75. /* Note that the OOM status is edge signaled, so the
  76. * interrupt won't happen again until the we actually
  77. * add more memory. Also, as of V3D 4.1, FLDONE won't
  78. * be reported until any OOM state has been cleared.
  79. */
  80. schedule_work(&v3d->overflow_mem_work);
  81. status = IRQ_HANDLED;
  82. }
  83. if (intsts & V3D_INT_FLDONE) {
  84. struct v3d_fence *fence =
  85. to_v3d_fence(v3d->bin_job->base.irq_fence);
  86. trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
  87. dma_fence_signal(&fence->base);
  88. status = IRQ_HANDLED;
  89. }
  90. if (intsts & V3D_INT_FRDONE) {
  91. struct v3d_fence *fence =
  92. to_v3d_fence(v3d->render_job->base.irq_fence);
  93. trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
  94. dma_fence_signal(&fence->base);
  95. status = IRQ_HANDLED;
  96. }
  97. if (intsts & V3D_INT_CSDDONE) {
  98. struct v3d_fence *fence =
  99. to_v3d_fence(v3d->csd_job->base.irq_fence);
  100. trace_v3d_csd_irq(&v3d->drm, fence->seqno);
  101. dma_fence_signal(&fence->base);
  102. status = IRQ_HANDLED;
  103. }
  104. /* We shouldn't be triggering these if we have GMP in
  105. * always-allowed mode.
  106. */
  107. if (intsts & V3D_INT_GMPV)
  108. dev_err(v3d->drm.dev, "GMP violation\n");
  109. /* V3D 4.2 wires the hub and core IRQs together, so if we &
  110. * didn't see the common one then check hub for MMU IRQs.
  111. */
  112. if (v3d->single_irq_line && status == IRQ_NONE)
  113. return v3d_hub_irq(irq, arg);
  114. return status;
  115. }
  116. static irqreturn_t
  117. v3d_hub_irq(int irq, void *arg)
  118. {
  119. struct v3d_dev *v3d = arg;
  120. u32 intsts;
  121. irqreturn_t status = IRQ_NONE;
  122. intsts = V3D_READ(V3D_HUB_INT_STS);
  123. /* Acknowledge the interrupts we're handling here. */
  124. V3D_WRITE(V3D_HUB_INT_CLR, intsts);
  125. if (intsts & V3D_HUB_INT_TFUC) {
  126. struct v3d_fence *fence =
  127. to_v3d_fence(v3d->tfu_job->base.irq_fence);
  128. trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
  129. dma_fence_signal(&fence->base);
  130. status = IRQ_HANDLED;
  131. }
  132. if (intsts & (V3D_HUB_INT_MMU_WRV |
  133. V3D_HUB_INT_MMU_PTI |
  134. V3D_HUB_INT_MMU_CAP)) {
  135. u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
  136. u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
  137. (v3d->va_width - 32));
  138. static const char *const v3d41_axi_ids[] = {
  139. "L2T",
  140. "PTB",
  141. "PSE",
  142. "TLB",
  143. "CLE",
  144. "TFU",
  145. "MMU",
  146. "GMP",
  147. };
  148. const char *client = "?";
  149. V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
  150. if (v3d->ver >= 41) {
  151. axi_id = axi_id >> 5;
  152. if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
  153. client = v3d41_axi_ids[axi_id];
  154. }
  155. dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
  156. client, axi_id, (long long)vio_addr,
  157. ((intsts & V3D_HUB_INT_MMU_WRV) ?
  158. ", write violation" : ""),
  159. ((intsts & V3D_HUB_INT_MMU_PTI) ?
  160. ", pte invalid" : ""),
  161. ((intsts & V3D_HUB_INT_MMU_CAP) ?
  162. ", cap exceeded" : ""));
  163. status = IRQ_HANDLED;
  164. }
  165. return status;
  166. }
  167. int
  168. v3d_irq_init(struct v3d_dev *v3d)
  169. {
  170. int irq1, ret, core;
  171. INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
  172. /* Clear any pending interrupts someone might have left around
  173. * for us.
  174. */
  175. for (core = 0; core < v3d->cores; core++)
  176. V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
  177. V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
  178. irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
  179. if (irq1 == -EPROBE_DEFER)
  180. return irq1;
  181. if (irq1 > 0) {
  182. ret = devm_request_irq(v3d->drm.dev, irq1,
  183. v3d_irq, IRQF_SHARED,
  184. "v3d_core0", v3d);
  185. if (ret)
  186. goto fail;
  187. ret = devm_request_irq(v3d->drm.dev,
  188. platform_get_irq(v3d_to_pdev(v3d), 0),
  189. v3d_hub_irq, IRQF_SHARED,
  190. "v3d_hub", v3d);
  191. if (ret)
  192. goto fail;
  193. } else {
  194. v3d->single_irq_line = true;
  195. ret = devm_request_irq(v3d->drm.dev,
  196. platform_get_irq(v3d_to_pdev(v3d), 0),
  197. v3d_irq, IRQF_SHARED,
  198. "v3d", v3d);
  199. if (ret)
  200. goto fail;
  201. }
  202. v3d_irq_enable(v3d);
  203. return 0;
  204. fail:
  205. if (ret != -EPROBE_DEFER)
  206. dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
  207. return ret;
  208. }
  209. void
  210. v3d_irq_enable(struct v3d_dev *v3d)
  211. {
  212. int core;
  213. /* Enable our set of interrupts, masking out any others. */
  214. for (core = 0; core < v3d->cores; core++) {
  215. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
  216. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
  217. }
  218. V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
  219. V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
  220. }
  221. void
  222. v3d_irq_disable(struct v3d_dev *v3d)
  223. {
  224. int core;
  225. /* Disable all interrupts. */
  226. for (core = 0; core < v3d->cores; core++)
  227. V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
  228. V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
  229. /* Clear any pending interrupts we might have left. */
  230. for (core = 0; core < v3d->cores; core++)
  231. V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
  232. V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
  233. cancel_work_sync(&v3d->overflow_mem_work);
  234. }
  235. /** Reinitializes interrupt registers when a GPU reset is performed. */
  236. void v3d_irq_reset(struct v3d_dev *v3d)
  237. {
  238. v3d_irq_enable(v3d);
  239. }