psb_irq.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /**************************************************************************
  3. * Copyright (c) 2007, Intel Corporation.
  4. * All Rights Reserved.
  5. *
  6. * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
  7. * develop this driver.
  8. *
  9. **************************************************************************/
  10. #include <drm/drm_drv.h>
  11. #include <drm/drm_vblank.h>
  12. #include "power.h"
  13. #include "psb_drv.h"
  14. #include "psb_intel_reg.h"
  15. #include "psb_irq.h"
  16. #include "psb_reg.h"
  17. /*
  18. * inline functions
  19. */
  20. static inline u32 gma_pipestat(int pipe)
  21. {
  22. if (pipe == 0)
  23. return PIPEASTAT;
  24. if (pipe == 1)
  25. return PIPEBSTAT;
  26. if (pipe == 2)
  27. return PIPECSTAT;
  28. BUG();
  29. }
  30. static inline u32 gma_pipe_event(int pipe)
  31. {
  32. if (pipe == 0)
  33. return _PSB_PIPEA_EVENT_FLAG;
  34. if (pipe == 1)
  35. return _MDFLD_PIPEB_EVENT_FLAG;
  36. if (pipe == 2)
  37. return _MDFLD_PIPEC_EVENT_FLAG;
  38. BUG();
  39. }
  40. static inline u32 gma_pipeconf(int pipe)
  41. {
  42. if (pipe == 0)
  43. return PIPEACONF;
  44. if (pipe == 1)
  45. return PIPEBCONF;
  46. if (pipe == 2)
  47. return PIPECCONF;
  48. BUG();
  49. }
  50. void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
  51. {
  52. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  53. u32 reg = gma_pipestat(pipe);
  54. dev_priv->pipestat[pipe] |= mask;
  55. /* Enable the interrupt, clear any pending status */
  56. if (gma_power_begin(&dev_priv->dev, false)) {
  57. u32 writeVal = PSB_RVDC32(reg);
  58. writeVal |= (mask | (mask >> 16));
  59. PSB_WVDC32(writeVal, reg);
  60. (void) PSB_RVDC32(reg);
  61. gma_power_end(&dev_priv->dev);
  62. }
  63. }
  64. }
  65. void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
  66. {
  67. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  68. u32 reg = gma_pipestat(pipe);
  69. dev_priv->pipestat[pipe] &= ~mask;
  70. if (gma_power_begin(&dev_priv->dev, false)) {
  71. u32 writeVal = PSB_RVDC32(reg);
  72. writeVal &= ~mask;
  73. PSB_WVDC32(writeVal, reg);
  74. (void) PSB_RVDC32(reg);
  75. gma_power_end(&dev_priv->dev);
  76. }
  77. }
  78. }
  79. /*
  80. * Display controller interrupt handler for pipe event.
  81. */
  82. static void gma_pipe_event_handler(struct drm_device *dev, int pipe)
  83. {
  84. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  85. uint32_t pipe_stat_val = 0;
  86. uint32_t pipe_stat_reg = gma_pipestat(pipe);
  87. uint32_t pipe_enable = dev_priv->pipestat[pipe];
  88. uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
  89. uint32_t pipe_clear;
  90. uint32_t i = 0;
  91. spin_lock(&dev_priv->irqmask_lock);
  92. pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
  93. pipe_stat_val &= pipe_enable | pipe_status;
  94. pipe_stat_val &= pipe_stat_val >> 16;
  95. spin_unlock(&dev_priv->irqmask_lock);
  96. /* Clear the 2nd level interrupt status bits
  97. * Sometimes the bits are very sticky so we repeat until they unstick */
  98. for (i = 0; i < 0xffff; i++) {
  99. PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
  100. pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
  101. if (pipe_clear == 0)
  102. break;
  103. }
  104. if (pipe_clear)
  105. dev_err(dev->dev,
  106. "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
  107. __func__, pipe, PSB_RVDC32(pipe_stat_reg));
  108. if (pipe_stat_val & PIPE_VBLANK_STATUS) {
  109. struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
  110. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  111. unsigned long flags;
  112. drm_handle_vblank(dev, pipe);
  113. spin_lock_irqsave(&dev->event_lock, flags);
  114. if (gma_crtc->page_flip_event) {
  115. drm_crtc_send_vblank_event(crtc,
  116. gma_crtc->page_flip_event);
  117. gma_crtc->page_flip_event = NULL;
  118. drm_crtc_vblank_put(crtc);
  119. }
  120. spin_unlock_irqrestore(&dev->event_lock, flags);
  121. }
  122. }
  123. /*
  124. * Display controller interrupt handler.
  125. */
  126. static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
  127. {
  128. if (vdc_stat & _PSB_IRQ_ASLE)
  129. psb_intel_opregion_asle_intr(dev);
  130. if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
  131. gma_pipe_event_handler(dev, 0);
  132. if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
  133. gma_pipe_event_handler(dev, 1);
  134. }
  135. /*
  136. * SGX interrupt handler
  137. */
  138. static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
  139. {
  140. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  141. u32 val, addr;
  142. if (stat_1 & _PSB_CE_TWOD_COMPLETE)
  143. val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
  144. if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
  145. val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
  146. addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
  147. if (val) {
  148. if (val & _PSB_CBI_STAT_PF_N_RW)
  149. DRM_ERROR("SGX MMU page fault:");
  150. else
  151. DRM_ERROR("SGX MMU read / write protection fault:");
  152. if (val & _PSB_CBI_STAT_FAULT_CACHE)
  153. DRM_ERROR("\tCache requestor");
  154. if (val & _PSB_CBI_STAT_FAULT_TA)
  155. DRM_ERROR("\tTA requestor");
  156. if (val & _PSB_CBI_STAT_FAULT_VDM)
  157. DRM_ERROR("\tVDM requestor");
  158. if (val & _PSB_CBI_STAT_FAULT_2D)
  159. DRM_ERROR("\t2D requestor");
  160. if (val & _PSB_CBI_STAT_FAULT_PBE)
  161. DRM_ERROR("\tPBE requestor");
  162. if (val & _PSB_CBI_STAT_FAULT_TSP)
  163. DRM_ERROR("\tTSP requestor");
  164. if (val & _PSB_CBI_STAT_FAULT_ISP)
  165. DRM_ERROR("\tISP requestor");
  166. if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
  167. DRM_ERROR("\tUSSEPDS requestor");
  168. if (val & _PSB_CBI_STAT_FAULT_HOST)
  169. DRM_ERROR("\tHost requestor");
  170. DRM_ERROR("\tMMU failing address is 0x%08x.\n",
  171. (unsigned int)addr);
  172. }
  173. }
  174. /* Clear bits */
  175. PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
  176. PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
  177. PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
  178. }
  179. static irqreturn_t gma_irq_handler(int irq, void *arg)
  180. {
  181. struct drm_device *dev = arg;
  182. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  183. uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
  184. u32 sgx_stat_1, sgx_stat_2;
  185. int handled = 0;
  186. spin_lock(&dev_priv->irqmask_lock);
  187. vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
  188. if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
  189. dsp_int = 1;
  190. if (vdc_stat & _PSB_IRQ_SGX_FLAG)
  191. sgx_int = 1;
  192. if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
  193. hotplug_int = 1;
  194. vdc_stat &= dev_priv->vdc_irq_mask;
  195. spin_unlock(&dev_priv->irqmask_lock);
  196. if (dsp_int) {
  197. gma_vdc_interrupt(dev, vdc_stat);
  198. handled = 1;
  199. }
  200. if (sgx_int) {
  201. sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
  202. sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
  203. gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
  204. handled = 1;
  205. }
  206. /* Note: this bit has other meanings on some devices, so we will
  207. need to address that later if it ever matters */
  208. if (hotplug_int && dev_priv->ops->hotplug) {
  209. handled = dev_priv->ops->hotplug(dev);
  210. REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
  211. }
  212. PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
  213. (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
  214. rmb();
  215. if (!handled)
  216. return IRQ_NONE;
  217. return IRQ_HANDLED;
  218. }
  219. void gma_irq_preinstall(struct drm_device *dev)
  220. {
  221. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  222. unsigned long irqflags;
  223. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  224. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  225. PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
  226. PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
  227. PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
  228. PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
  229. if (dev->vblank[0].enabled)
  230. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
  231. if (dev->vblank[1].enabled)
  232. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
  233. /* Revisit this area - want per device masks ? */
  234. if (dev_priv->ops->hotplug)
  235. dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
  236. dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
  237. /* This register is safe even if display island is off */
  238. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  239. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  240. }
  241. void gma_irq_postinstall(struct drm_device *dev)
  242. {
  243. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  244. unsigned long irqflags;
  245. unsigned int i;
  246. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  247. /* Enable 2D and MMU fault interrupts */
  248. PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
  249. PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
  250. PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
  251. /* This register is safe even if display island is off */
  252. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  253. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  254. for (i = 0; i < dev->num_crtcs; ++i) {
  255. if (dev->vblank[i].enabled)
  256. gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  257. else
  258. gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  259. }
  260. if (dev_priv->ops->hotplug_enable)
  261. dev_priv->ops->hotplug_enable(dev, true);
  262. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  263. }
  264. int gma_irq_install(struct drm_device *dev)
  265. {
  266. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  267. struct pci_dev *pdev = to_pci_dev(dev->dev);
  268. int ret;
  269. if (dev_priv->use_msi && pci_enable_msi(pdev)) {
  270. dev_warn(dev->dev, "Enabling MSI failed!\n");
  271. dev_priv->use_msi = false;
  272. }
  273. if (pdev->irq == IRQ_NOTCONNECTED)
  274. return -ENOTCONN;
  275. gma_irq_preinstall(dev);
  276. /* PCI devices require shared interrupts. */
  277. ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
  278. if (ret)
  279. return ret;
  280. gma_irq_postinstall(dev);
  281. dev_priv->irq_enabled = true;
  282. return 0;
  283. }
  284. void gma_irq_uninstall(struct drm_device *dev)
  285. {
  286. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  287. struct pci_dev *pdev = to_pci_dev(dev->dev);
  288. unsigned long irqflags;
  289. unsigned int i;
  290. if (!dev_priv->irq_enabled)
  291. return;
  292. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  293. if (dev_priv->ops->hotplug_enable)
  294. dev_priv->ops->hotplug_enable(dev, false);
  295. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  296. for (i = 0; i < dev->num_crtcs; ++i) {
  297. if (dev->vblank[i].enabled)
  298. gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  299. }
  300. dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
  301. _PSB_IRQ_MSVDX_FLAG |
  302. _LNC_IRQ_TOPAZ_FLAG;
  303. /* These two registers are safe even if display island is off */
  304. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  305. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  306. wmb();
  307. /* This register is safe even if display island is off */
  308. PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
  309. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  310. free_irq(pdev->irq, dev);
  311. if (dev_priv->use_msi)
  312. pci_disable_msi(pdev);
  313. }
  314. int gma_crtc_enable_vblank(struct drm_crtc *crtc)
  315. {
  316. struct drm_device *dev = crtc->dev;
  317. unsigned int pipe = crtc->index;
  318. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  319. unsigned long irqflags;
  320. uint32_t reg_val = 0;
  321. uint32_t pipeconf_reg = gma_pipeconf(pipe);
  322. if (gma_power_begin(dev, false)) {
  323. reg_val = REG_READ(pipeconf_reg);
  324. gma_power_end(dev);
  325. }
  326. if (!(reg_val & PIPEACONF_ENABLE))
  327. return -EINVAL;
  328. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  329. if (pipe == 0)
  330. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
  331. else if (pipe == 1)
  332. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
  333. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  334. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  335. gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
  336. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  337. return 0;
  338. }
  339. void gma_crtc_disable_vblank(struct drm_crtc *crtc)
  340. {
  341. struct drm_device *dev = crtc->dev;
  342. unsigned int pipe = crtc->index;
  343. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  344. unsigned long irqflags;
  345. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  346. if (pipe == 0)
  347. dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
  348. else if (pipe == 1)
  349. dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
  350. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  351. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  352. gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
  353. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  354. }
  355. /* Called from drm generic code, passed a 'crtc', which
  356. * we use as a pipe index
  357. */
  358. u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc)
  359. {
  360. struct drm_device *dev = crtc->dev;
  361. unsigned int pipe = crtc->index;
  362. uint32_t high_frame = PIPEAFRAMEHIGH;
  363. uint32_t low_frame = PIPEAFRAMEPIXEL;
  364. uint32_t pipeconf_reg = PIPEACONF;
  365. uint32_t reg_val = 0;
  366. uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
  367. switch (pipe) {
  368. case 0:
  369. break;
  370. case 1:
  371. high_frame = PIPEBFRAMEHIGH;
  372. low_frame = PIPEBFRAMEPIXEL;
  373. pipeconf_reg = PIPEBCONF;
  374. break;
  375. case 2:
  376. high_frame = PIPECFRAMEHIGH;
  377. low_frame = PIPECFRAMEPIXEL;
  378. pipeconf_reg = PIPECCONF;
  379. break;
  380. default:
  381. dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
  382. return 0;
  383. }
  384. if (!gma_power_begin(dev, false))
  385. return 0;
  386. reg_val = REG_READ(pipeconf_reg);
  387. if (!(reg_val & PIPEACONF_ENABLE)) {
  388. dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
  389. pipe);
  390. goto err_gma_power_end;
  391. }
  392. /*
  393. * High & low register fields aren't synchronized, so make sure
  394. * we get a low value that's stable across two reads of the high
  395. * register.
  396. */
  397. do {
  398. high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  399. PIPE_FRAME_HIGH_SHIFT);
  400. low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  401. PIPE_FRAME_LOW_SHIFT);
  402. high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  403. PIPE_FRAME_HIGH_SHIFT);
  404. } while (high1 != high2);
  405. count = (high1 << 8) | low;
  406. err_gma_power_end:
  407. gma_power_end(dev);
  408. return count;
  409. }