omap_irq.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
  4. * Author: Rob Clark <[email protected]>
  5. */
  6. #include <drm/drm_vblank.h>
  7. #include "omap_drv.h"
  8. struct omap_irq_wait {
  9. struct list_head node;
  10. wait_queue_head_t wq;
  11. u32 irqmask;
  12. int count;
  13. };
  14. /* call with wait_lock and dispc runtime held */
  15. static void omap_irq_update(struct drm_device *dev)
  16. {
  17. struct omap_drm_private *priv = dev->dev_private;
  18. struct omap_irq_wait *wait;
  19. u32 irqmask = priv->irq_mask;
  20. assert_spin_locked(&priv->wait_lock);
  21. list_for_each_entry(wait, &priv->wait_list, node)
  22. irqmask |= wait->irqmask;
  23. DBG("irqmask=%08x", irqmask);
  24. dispc_write_irqenable(priv->dispc, irqmask);
  25. }
  26. static void omap_irq_wait_handler(struct omap_irq_wait *wait)
  27. {
  28. wait->count--;
  29. wake_up(&wait->wq);
  30. }
  31. struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
  32. u32 irqmask, int count)
  33. {
  34. struct omap_drm_private *priv = dev->dev_private;
  35. struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
  36. unsigned long flags;
  37. init_waitqueue_head(&wait->wq);
  38. wait->irqmask = irqmask;
  39. wait->count = count;
  40. spin_lock_irqsave(&priv->wait_lock, flags);
  41. list_add(&wait->node, &priv->wait_list);
  42. omap_irq_update(dev);
  43. spin_unlock_irqrestore(&priv->wait_lock, flags);
  44. return wait;
  45. }
  46. int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
  47. unsigned long timeout)
  48. {
  49. struct omap_drm_private *priv = dev->dev_private;
  50. unsigned long flags;
  51. int ret;
  52. ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
  53. spin_lock_irqsave(&priv->wait_lock, flags);
  54. list_del(&wait->node);
  55. omap_irq_update(dev);
  56. spin_unlock_irqrestore(&priv->wait_lock, flags);
  57. kfree(wait);
  58. return ret == 0 ? -1 : 0;
  59. }
  60. int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
  61. {
  62. struct drm_device *dev = crtc->dev;
  63. struct omap_drm_private *priv = dev->dev_private;
  64. unsigned long flags;
  65. enum omap_channel channel = omap_crtc_channel(crtc);
  66. int framedone_irq =
  67. dispc_mgr_get_framedone_irq(priv->dispc, channel);
  68. DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
  69. spin_lock_irqsave(&priv->wait_lock, flags);
  70. if (enable)
  71. priv->irq_mask |= framedone_irq;
  72. else
  73. priv->irq_mask &= ~framedone_irq;
  74. omap_irq_update(dev);
  75. spin_unlock_irqrestore(&priv->wait_lock, flags);
  76. return 0;
  77. }
  78. /**
  79. * enable_vblank - enable vblank interrupt events
  80. * @crtc: DRM CRTC
  81. *
  82. * Enable vblank interrupts for @crtc. If the device doesn't have
  83. * a hardware vblank counter, this routine should be a no-op, since
  84. * interrupts will have to stay on to keep the count accurate.
  85. *
  86. * RETURNS
  87. * Zero on success, appropriate errno if the given @crtc's vblank
  88. * interrupt cannot be enabled.
  89. */
  90. int omap_irq_enable_vblank(struct drm_crtc *crtc)
  91. {
  92. struct drm_device *dev = crtc->dev;
  93. struct omap_drm_private *priv = dev->dev_private;
  94. unsigned long flags;
  95. enum omap_channel channel = omap_crtc_channel(crtc);
  96. DBG("dev=%p, crtc=%u", dev, channel);
  97. spin_lock_irqsave(&priv->wait_lock, flags);
  98. priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
  99. channel);
  100. omap_irq_update(dev);
  101. spin_unlock_irqrestore(&priv->wait_lock, flags);
  102. return 0;
  103. }
  104. /**
  105. * disable_vblank - disable vblank interrupt events
  106. * @crtc: DRM CRTC
  107. *
  108. * Disable vblank interrupts for @crtc. If the device doesn't have
  109. * a hardware vblank counter, this routine should be a no-op, since
  110. * interrupts will have to stay on to keep the count accurate.
  111. */
  112. void omap_irq_disable_vblank(struct drm_crtc *crtc)
  113. {
  114. struct drm_device *dev = crtc->dev;
  115. struct omap_drm_private *priv = dev->dev_private;
  116. unsigned long flags;
  117. enum omap_channel channel = omap_crtc_channel(crtc);
  118. DBG("dev=%p, crtc=%u", dev, channel);
  119. spin_lock_irqsave(&priv->wait_lock, flags);
  120. priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
  121. channel);
  122. omap_irq_update(dev);
  123. spin_unlock_irqrestore(&priv->wait_lock, flags);
  124. }
  125. static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
  126. u32 irqstatus)
  127. {
  128. static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
  129. DEFAULT_RATELIMIT_BURST);
  130. static const struct {
  131. const char *name;
  132. u32 mask;
  133. } sources[] = {
  134. { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
  135. { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
  136. { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
  137. { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
  138. };
  139. const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
  140. | DISPC_IRQ_VID1_FIFO_UNDERFLOW
  141. | DISPC_IRQ_VID2_FIFO_UNDERFLOW
  142. | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
  143. unsigned int i;
  144. spin_lock(&priv->wait_lock);
  145. irqstatus &= priv->irq_mask & mask;
  146. spin_unlock(&priv->wait_lock);
  147. if (!irqstatus)
  148. return;
  149. if (!__ratelimit(&_rs))
  150. return;
  151. DRM_ERROR("FIFO underflow on ");
  152. for (i = 0; i < ARRAY_SIZE(sources); ++i) {
  153. if (sources[i].mask & irqstatus)
  154. pr_cont("%s ", sources[i].name);
  155. }
  156. pr_cont("(0x%08x)\n", irqstatus);
  157. }
  158. static void omap_irq_ocp_error_handler(struct drm_device *dev,
  159. u32 irqstatus)
  160. {
  161. if (!(irqstatus & DISPC_IRQ_OCP_ERR))
  162. return;
  163. dev_err_ratelimited(dev->dev, "OCP error\n");
  164. }
  165. static irqreturn_t omap_irq_handler(int irq, void *arg)
  166. {
  167. struct drm_device *dev = (struct drm_device *) arg;
  168. struct omap_drm_private *priv = dev->dev_private;
  169. struct omap_irq_wait *wait, *n;
  170. unsigned long flags;
  171. unsigned int id;
  172. u32 irqstatus;
  173. irqstatus = dispc_read_irqstatus(priv->dispc);
  174. dispc_clear_irqstatus(priv->dispc, irqstatus);
  175. dispc_read_irqstatus(priv->dispc); /* flush posted write */
  176. VERB("irqs: %08x", irqstatus);
  177. for (id = 0; id < priv->num_pipes; id++) {
  178. struct drm_crtc *crtc = priv->pipes[id].crtc;
  179. enum omap_channel channel = omap_crtc_channel(crtc);
  180. if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
  181. drm_handle_vblank(dev, id);
  182. omap_crtc_vblank_irq(crtc);
  183. }
  184. if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
  185. omap_crtc_error_irq(crtc, irqstatus);
  186. if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
  187. omap_crtc_framedone_irq(crtc, irqstatus);
  188. }
  189. omap_irq_ocp_error_handler(dev, irqstatus);
  190. omap_irq_fifo_underflow(priv, irqstatus);
  191. spin_lock_irqsave(&priv->wait_lock, flags);
  192. list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
  193. if (wait->irqmask & irqstatus)
  194. omap_irq_wait_handler(wait);
  195. }
  196. spin_unlock_irqrestore(&priv->wait_lock, flags);
  197. return IRQ_HANDLED;
  198. }
  199. static const u32 omap_underflow_irqs[] = {
  200. [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
  201. [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
  202. [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
  203. [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
  204. };
  205. int omap_drm_irq_install(struct drm_device *dev)
  206. {
  207. struct omap_drm_private *priv = dev->dev_private;
  208. unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
  209. unsigned int max_planes;
  210. unsigned int i;
  211. int ret;
  212. spin_lock_init(&priv->wait_lock);
  213. INIT_LIST_HEAD(&priv->wait_list);
  214. priv->irq_mask = DISPC_IRQ_OCP_ERR;
  215. max_planes = min(ARRAY_SIZE(priv->planes),
  216. ARRAY_SIZE(omap_underflow_irqs));
  217. for (i = 0; i < max_planes; ++i) {
  218. if (priv->planes[i])
  219. priv->irq_mask |= omap_underflow_irqs[i];
  220. }
  221. for (i = 0; i < num_mgrs; ++i)
  222. priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
  223. dispc_runtime_get(priv->dispc);
  224. dispc_clear_irqstatus(priv->dispc, 0xffffffff);
  225. dispc_runtime_put(priv->dispc);
  226. ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
  227. if (ret < 0)
  228. return ret;
  229. priv->irq_enabled = true;
  230. return 0;
  231. }
  232. void omap_drm_irq_uninstall(struct drm_device *dev)
  233. {
  234. struct omap_drm_private *priv = dev->dev_private;
  235. if (!priv->irq_enabled)
  236. return;
  237. priv->irq_enabled = false;
  238. dispc_free_irq(priv->dispc, dev);
  239. }