sde_hw_interrupts.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/slab.h>
  7. #include "sde_kms.h"
  8. #include "sde_hw_interrupts.h"
  9. #include "sde_hw_util.h"
  10. #include "sde_hw_mdss.h"
  11. /**
  12. * Register offsets in MDSS register file for the interrupt registers
  13. * w.r.t. base for that block. Base offsets for IRQs should come from the
  14. * device tree and get stored in the catalog(irq_offset_list) until they
  15. * are added to the sde_irq_tbl during the table initialization.
  16. */
  17. #define HW_INTR_STATUS 0x0010
  18. #define MDP_AD4_INTR_EN_OFF 0x41c
  19. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  20. #define MDP_AD4_INTR_STATUS_OFF 0x420
  21. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  22. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  23. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  24. #define MDP_LTM_INTR_EN_OFF 0x50
  25. #define MDP_LTM_INTR_STATUS_OFF 0x54
  26. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  27. /**
  28. * WB interrupt status bit definitions
  29. */
  30. #define SDE_INTR_WB_0_DONE BIT(0)
  31. #define SDE_INTR_WB_1_DONE BIT(1)
  32. #define SDE_INTR_WB_2_DONE BIT(4)
  33. /**
  34. * WDOG timer interrupt status bit definitions
  35. */
  36. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  37. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  38. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  39. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  40. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  41. /**
  42. * Pingpong interrupt status bit definitions
  43. */
  44. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  45. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  46. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  47. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  48. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  49. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  50. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  51. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  52. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  53. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  54. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  55. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  56. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  57. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  58. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  59. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  60. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  61. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  62. /**
  63. * Interface interrupt status bit definitions
  64. */
  65. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  66. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  67. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  68. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  69. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  70. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  71. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  72. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  73. /**
  74. * Pingpong Secondary interrupt status bit definitions
  75. */
  76. #define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
  77. #define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
  78. #define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
  79. #define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
  80. #define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
  81. /**
  82. * Pingpong TEAR detection interrupt status bit definitions
  83. */
  84. #define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
  85. #define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
  86. #define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
  87. #define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
  88. /**
  89. * Pingpong TE detection interrupt status bit definitions
  90. */
  91. #define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
  92. #define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
  93. #define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
  94. #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
  95. /**
  96. * Ctl start interrupt status bit definitions
  97. */
  98. #define SDE_INTR_CTL_0_START BIT(9)
  99. #define SDE_INTR_CTL_1_START BIT(10)
  100. #define SDE_INTR_CTL_2_START BIT(11)
  101. #define SDE_INTR_CTL_3_START BIT(12)
  102. #define SDE_INTR_CTL_4_START BIT(13)
  103. #define SDE_INTR_CTL_5_START BIT(23)
  104. /**
  105. * Concurrent WB overflow interrupt status bit definitions
  106. */
  107. #define SDE_INTR_CWB_1_OVERFLOW BIT(8)
  108. #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
  109. #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
  110. #define SDE_INTR_CWB_4_OVERFLOW BIT(20)
  111. #define SDE_INTR_CWB_5_OVERFLOW BIT(21)
  112. /**
  113. * Histogram VIG done interrupt status bit definitions
  114. */
  115. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  116. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  117. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  118. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  119. /**
  120. * Histogram VIG reset Sequence done interrupt status bit definitions
  121. */
  122. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  123. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  124. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  125. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  126. /**
  127. * Histogram DSPP done interrupt status bit definitions
  128. */
  129. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  130. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  131. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  132. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  133. /**
  134. * Histogram DSPP reset Sequence done interrupt status bit definitions
  135. */
  136. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  137. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  138. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  139. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  140. /**
  141. * INTF interrupt status bit definitions
  142. */
  143. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  144. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  145. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  146. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  147. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  148. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  149. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  150. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  151. #define SDE_INTR_PROG_LINE BIT(8)
  152. /**
  153. * AD4 interrupt status bit definitions
  154. */
  155. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  156. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  157. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  158. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  159. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  160. /**
  161. * INTF Tear IRQ register bit definitions
  162. */
  163. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  164. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  165. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  166. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  167. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  168. /**
  169. * LTM interrupt status bit definitions
  170. */
  171. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  172. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  173. /**
  174. * struct sde_intr_reg - array of SDE register sets
  175. * @clr_off: offset to CLEAR reg
  176. * @en_off: offset to ENABLE reg
  177. * @status_off: offset to STATUS reg
  178. * @map_idx_start first offset in the sde_irq_map table
  179. * @map_idx_end last offset in the sde_irq_map table
  180. */
  181. struct sde_intr_reg {
  182. u32 clr_off;
  183. u32 en_off;
  184. u32 status_off;
  185. u32 map_idx_start;
  186. u32 map_idx_end;
  187. };
  188. /**
  189. * struct sde_irq_type - maps each irq with i/f
  190. * @intr_type: type of interrupt listed in sde_intr_type
  191. * @instance_idx: instance index of the associated HW block in SDE
  192. * @irq_mask: corresponding bit in the interrupt status reg
  193. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  194. * registers offsets to use.
  195. */
  196. struct sde_irq_type {
  197. u32 intr_type;
  198. u32 instance_idx;
  199. u32 irq_mask;
  200. int reg_idx;
  201. };
  202. /**
  203. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  204. * a matching interface type and instance index.
  205. * Each of these tables are copied to a dynamically allocated
  206. * table, that will be used to service each of the irqs
  207. * -1 indicates an uninitialized value which should be set when copying
  208. * these tables to the sde_irq_map.
  209. */
  210. static struct sde_irq_type sde_irq_intr_map[] = {
  211. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  212. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
  213. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  214. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  215. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  216. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  217. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  218. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  219. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  220. SDE_INTR_PING_PONG_0_DONE, -1},
  221. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  222. SDE_INTR_PING_PONG_1_DONE, -1},
  223. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  224. SDE_INTR_PING_PONG_2_DONE, -1},
  225. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  226. SDE_INTR_PING_PONG_3_DONE, -1},
  227. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  228. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  229. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  230. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  231. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  232. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  233. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  234. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  235. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  236. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  237. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  238. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  239. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  240. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  241. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  242. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  243. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  244. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  245. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  246. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  247. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  248. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  249. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  250. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  251. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  252. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  253. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  254. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  255. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  256. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  257. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  258. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  259. };
  260. static struct sde_irq_type sde_irq_intr2_map[] = {
  261. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
  262. SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
  263. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
  264. SDE_INTR_PING_PONG_S0_WR_PTR, -1},
  265. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_1, SDE_INTR_CWB_1_OVERFLOW, -1},
  266. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
  267. SDE_INTR_PING_PONG_S0_RD_PTR, -1},
  268. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  269. SDE_INTR_CTL_0_START, -1},
  270. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  271. SDE_INTR_CTL_1_START, -1},
  272. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  273. SDE_INTR_CTL_2_START, -1},
  274. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  275. SDE_INTR_CTL_3_START, -1},
  276. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  277. SDE_INTR_CTL_4_START, -1},
  278. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  279. SDE_INTR_CTL_5_START, -1},
  280. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  281. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
  282. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
  283. SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
  284. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
  285. SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
  286. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
  287. SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
  288. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
  289. SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
  290. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
  291. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
  292. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
  293. SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
  294. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
  295. SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
  296. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
  297. SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
  298. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
  299. SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
  300. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
  301. SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
  302. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
  303. SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
  304. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  305. SDE_INTR_PING_PONG_4_DONE, -1},
  306. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  307. SDE_INTR_PING_PONG_5_DONE, -1},
  308. };
  309. static struct sde_irq_type sde_irq_hist_map[] = {
  310. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  311. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  312. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  313. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  314. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  315. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  316. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  317. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  318. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  319. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  320. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  321. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  322. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  323. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  324. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  325. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  326. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  327. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  328. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  329. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  330. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  331. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  332. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  333. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  334. };
  335. static struct sde_irq_type sde_irq_intf_map[] = {
  336. { SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
  337. SDE_INTR_VIDEO_INTO_STATIC, -1},
  338. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
  339. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  340. { SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
  341. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  342. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
  343. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  344. { SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
  345. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  346. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
  347. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  348. { SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
  349. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  350. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
  351. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  352. { SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
  353. };
  354. static struct sde_irq_type sde_irq_ad4_map[] = {
  355. { SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  356. };
  357. static struct sde_irq_type sde_irq_intf_te_map[] = {
  358. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
  359. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  360. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
  361. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  362. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
  363. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  364. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
  365. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  366. };
  367. static struct sde_irq_type sde_irq_ltm_map[] = {
  368. { SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
  369. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
  370. };
  371. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  372. enum sde_intr_type intr_type, u32 instance_idx)
  373. {
  374. int i;
  375. for (i = 0; i < intr->sde_irq_map_size; i++) {
  376. if (intr_type == intr->sde_irq_map[i].intr_type &&
  377. instance_idx == intr->sde_irq_map[i].instance_idx)
  378. return i;
  379. }
  380. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  381. intr_type, instance_idx);
  382. return -EINVAL;
  383. }
  384. static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
  385. uint32_t mask)
  386. {
  387. if (!intr)
  388. return;
  389. SDE_REG_WRITE(&intr->hw, reg_off, mask);
  390. /* ensure register writes go through */
  391. wmb();
  392. }
  393. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  394. void (*cbfunc)(void *, int),
  395. void *arg)
  396. {
  397. int reg_idx;
  398. int irq_idx;
  399. int start_idx;
  400. int end_idx;
  401. u32 irq_status;
  402. unsigned long irq_flags;
  403. if (!intr)
  404. return;
  405. /*
  406. * The dispatcher will save the IRQ status before calling here.
  407. * Now need to go through each IRQ status and find matching
  408. * irq lookup index.
  409. */
  410. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  411. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  412. irq_status = intr->save_irq_status[reg_idx];
  413. /*
  414. * Each Interrupt register has dynamic range of indexes,
  415. * initialized during hw_intr_init when sde_irq_tbl is created.
  416. */
  417. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  418. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  419. if (start_idx >= intr->sde_irq_map_size ||
  420. end_idx > intr->sde_irq_map_size)
  421. continue;
  422. /*
  423. * Search through matching intr status from irq map.
  424. * start_idx and end_idx defined the search range in
  425. * the sde_irq_map.
  426. */
  427. for (irq_idx = start_idx;
  428. (irq_idx < end_idx) && irq_status;
  429. irq_idx++)
  430. if ((irq_status &
  431. intr->sde_irq_map[irq_idx].irq_mask) &&
  432. (intr->sde_irq_map[irq_idx].reg_idx ==
  433. reg_idx)) {
  434. /*
  435. * Once a match on irq mask, perform a callback
  436. * to the given cbfunc. cbfunc will take care
  437. * the interrupt status clearing. If cbfunc is
  438. * not provided, then the interrupt clearing
  439. * is here.
  440. */
  441. if (cbfunc)
  442. cbfunc(arg, irq_idx);
  443. else
  444. intr->ops.clear_intr_status_nolock(
  445. intr, irq_idx);
  446. /*
  447. * When callback finish, clear the irq_status
  448. * with the matching mask. Once irq_status
  449. * is all cleared, the search can be stopped.
  450. */
  451. irq_status &=
  452. ~intr->sde_irq_map[irq_idx].irq_mask;
  453. }
  454. }
  455. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  456. }
  457. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  458. {
  459. int reg_idx;
  460. const struct sde_intr_reg *reg;
  461. const struct sde_irq_type *irq;
  462. const char *dbgstr = NULL;
  463. uint32_t cache_irq_mask;
  464. if (!intr)
  465. return -EINVAL;
  466. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  467. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  468. return -EINVAL;
  469. }
  470. irq = &intr->sde_irq_map[irq_idx];
  471. reg_idx = irq->reg_idx;
  472. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  473. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  474. return -EINVAL;
  475. }
  476. reg = &intr->sde_irq_tbl[reg_idx];
  477. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  478. if (cache_irq_mask & irq->irq_mask) {
  479. dbgstr = "SDE IRQ already set:";
  480. } else {
  481. dbgstr = "SDE IRQ enabled:";
  482. cache_irq_mask |= irq->irq_mask;
  483. /* Cleaning any pending interrupt */
  484. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  485. /* Enabling interrupts with the new mask */
  486. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  487. /* ensure register write goes through */
  488. wmb();
  489. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  490. }
  491. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  492. irq->irq_mask, cache_irq_mask);
  493. return 0;
  494. }
  495. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  496. {
  497. int reg_idx;
  498. const struct sde_intr_reg *reg;
  499. const struct sde_irq_type *irq;
  500. const char *dbgstr = NULL;
  501. uint32_t cache_irq_mask;
  502. if (!intr)
  503. return -EINVAL;
  504. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  505. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  506. return -EINVAL;
  507. }
  508. irq = &intr->sde_irq_map[irq_idx];
  509. reg_idx = irq->reg_idx;
  510. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  511. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  512. return -EINVAL;
  513. }
  514. reg = &intr->sde_irq_tbl[reg_idx];
  515. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  516. if ((cache_irq_mask & irq->irq_mask) == 0) {
  517. dbgstr = "SDE IRQ is already cleared:";
  518. } else {
  519. dbgstr = "SDE IRQ mask disable:";
  520. cache_irq_mask &= ~irq->irq_mask;
  521. /* Disable interrupts based on the new mask */
  522. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  523. /* Cleaning any pending interrupt */
  524. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  525. /* ensure register write goes through */
  526. wmb();
  527. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  528. }
  529. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  530. irq->irq_mask, cache_irq_mask);
  531. return 0;
  532. }
  533. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  534. {
  535. int i;
  536. if (!intr)
  537. return -EINVAL;
  538. for (i = 0; i < intr->sde_irq_size; i++)
  539. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  540. 0xffffffff);
  541. /* ensure register writes go through */
  542. wmb();
  543. return 0;
  544. }
  545. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  546. {
  547. int i;
  548. if (!intr)
  549. return -EINVAL;
  550. for (i = 0; i < intr->sde_irq_size; i++)
  551. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  552. 0x00000000);
  553. /* ensure register writes go through */
  554. wmb();
  555. return 0;
  556. }
  557. static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
  558. uint32_t *mask)
  559. {
  560. if (!intr || !mask)
  561. return -EINVAL;
  562. *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
  563. | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
  564. return 0;
  565. }
  566. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  567. uint32_t *sources)
  568. {
  569. if (!intr || !sources)
  570. return -EINVAL;
  571. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  572. return 0;
  573. }
  574. static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
  575. {
  576. int i;
  577. u32 enable_mask;
  578. unsigned long irq_flags;
  579. if (!intr)
  580. return;
  581. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  582. for (i = 0; i < intr->sde_irq_size; i++) {
  583. /* Read interrupt status */
  584. intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
  585. intr->sde_irq_tbl[i].status_off);
  586. /* Read enable mask */
  587. enable_mask = SDE_REG_READ(&intr->hw,
  588. intr->sde_irq_tbl[i].en_off);
  589. /* and clear the interrupt */
  590. if (intr->save_irq_status[i])
  591. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  592. intr->save_irq_status[i]);
  593. /* Finally update IRQ status based on enable mask */
  594. intr->save_irq_status[i] &= enable_mask;
  595. }
  596. /* ensure register writes go through */
  597. wmb();
  598. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  599. }
  600. static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
  601. int irq_idx, u32 irq_mask)
  602. {
  603. int reg_idx;
  604. if (!intr)
  605. return;
  606. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  607. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  608. return;
  609. }
  610. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  611. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  612. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  613. return;
  614. }
  615. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  616. irq_mask);
  617. /* ensure register writes go through */
  618. wmb();
  619. }
  620. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  621. int irq_idx)
  622. {
  623. int reg_idx;
  624. if (!intr)
  625. return;
  626. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  627. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  628. return;
  629. }
  630. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  631. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  632. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  633. return;
  634. }
  635. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  636. intr->sde_irq_map[irq_idx].irq_mask);
  637. /* ensure register writes go through */
  638. wmb();
  639. }
  640. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  641. int irq_idx)
  642. {
  643. unsigned long irq_flags;
  644. if (!intr)
  645. return;
  646. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  647. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  648. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  649. }
  650. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  651. int irq_idx, bool clear)
  652. {
  653. int reg_idx;
  654. u32 intr_status;
  655. if (!intr)
  656. return 0;
  657. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  658. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  659. return 0;
  660. }
  661. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  662. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  663. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  664. return 0;
  665. }
  666. intr_status = SDE_REG_READ(&intr->hw,
  667. intr->sde_irq_tbl[reg_idx].status_off) &
  668. intr->sde_irq_map[irq_idx].irq_mask;
  669. if (intr_status && clear)
  670. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  671. intr_status);
  672. /* ensure register writes go through */
  673. wmb();
  674. return intr_status;
  675. }
  676. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  677. int irq_idx, bool clear)
  678. {
  679. int reg_idx;
  680. unsigned long irq_flags;
  681. u32 intr_status;
  682. if (!intr)
  683. return 0;
  684. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  685. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  686. return 0;
  687. }
  688. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  689. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  690. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  691. return 0;
  692. }
  693. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  694. intr_status = SDE_REG_READ(&intr->hw,
  695. intr->sde_irq_tbl[reg_idx].status_off) &
  696. intr->sde_irq_map[irq_idx].irq_mask;
  697. if (intr_status && clear)
  698. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  699. intr_status);
  700. /* ensure register writes go through */
  701. wmb();
  702. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  703. return intr_status;
  704. }
  705. static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
  706. int irq_idx, bool clear)
  707. {
  708. int reg_idx;
  709. unsigned long irq_flags;
  710. u32 intr_status = 0;
  711. if (!intr)
  712. return 0;
  713. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  714. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  715. return 0;
  716. }
  717. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  718. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  719. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  720. return 0;
  721. }
  722. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  723. intr_status = SDE_REG_READ(&intr->hw,
  724. intr->sde_irq_tbl[reg_idx].status_off);
  725. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  726. return intr_status;
  727. }
  728. static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
  729. struct sde_intr_irq_offsets *item)
  730. {
  731. u32 base_offset;
  732. if (!sde_irq || !item)
  733. return -EINVAL;
  734. base_offset = item->base_offset;
  735. switch (item->instance_idx) {
  736. case SDE_INTR_TOP_INTR:
  737. sde_irq->clr_off = base_offset + INTR_CLEAR;
  738. sde_irq->en_off = base_offset + INTR_EN;
  739. sde_irq->status_off = base_offset + INTR_STATUS;
  740. break;
  741. case SDE_INTR_TOP_INTR2:
  742. sde_irq->clr_off = base_offset + INTR2_CLEAR;
  743. sde_irq->en_off = base_offset + INTR2_EN;
  744. sde_irq->status_off = base_offset + INTR2_STATUS;
  745. break;
  746. case SDE_INTR_TOP_HIST_INTR:
  747. sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
  748. sde_irq->en_off = base_offset + HIST_INTR_EN;
  749. sde_irq->status_off = base_offset + HIST_INTR_STATUS;
  750. break;
  751. default:
  752. pr_err("invalid TOP intr for instance %d\n",
  753. item->instance_idx);
  754. return -EINVAL;
  755. }
  756. return 0;
  757. }
  758. static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
  759. struct sde_intr_irq_offsets *item)
  760. {
  761. u32 base_offset, rc = 0;
  762. if (!sde_irq || !item)
  763. return -EINVAL;
  764. base_offset = item->base_offset;
  765. switch (item->type) {
  766. case SDE_INTR_HWBLK_TOP:
  767. rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
  768. break;
  769. case SDE_INTR_HWBLK_INTF:
  770. sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
  771. sde_irq->en_off = base_offset + INTF_INTR_EN;
  772. sde_irq->status_off = base_offset + INTF_INTR_STATUS;
  773. break;
  774. case SDE_INTR_HWBLK_AD4:
  775. sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
  776. sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
  777. sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
  778. break;
  779. case SDE_INTR_HWBLK_INTF_TEAR:
  780. sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
  781. sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
  782. sde_irq->status_off = base_offset +
  783. MDP_INTF_TEAR_INTR_STATUS_OFF;
  784. break;
  785. case SDE_INTR_HWBLK_LTM:
  786. sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
  787. sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
  788. sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
  789. break;
  790. default:
  791. pr_err("unrecognized intr blk type %d\n",
  792. item->type);
  793. rc = -EINVAL;
  794. }
  795. return rc;
  796. }
  797. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  798. {
  799. ops->set_mask = sde_hw_intr_set_mask;
  800. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  801. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  802. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  803. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  804. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  805. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  806. ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
  807. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  808. ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
  809. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  810. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  811. ops->clear_intr_status_force_mask =
  812. sde_hw_intr_clear_intr_status_force_mask;
  813. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  814. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  815. ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
  816. }
  817. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  818. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  819. {
  820. if (!m || !addr || !hw || m->mdp_count == 0)
  821. return NULL;
  822. hw->base_off = addr;
  823. hw->blk_off = m->mdss[0].base;
  824. hw->hwversion = m->hwversion;
  825. return &m->mdss[0];
  826. }
  827. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  828. {
  829. if (intr) {
  830. kfree(intr->sde_irq_tbl);
  831. kfree(intr->sde_irq_map);
  832. kfree(intr->cache_irq_mask);
  833. kfree(intr->save_irq_status);
  834. kfree(intr);
  835. }
  836. }
  837. static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
  838. {
  839. u32 ret = 0;
  840. switch (inst) {
  841. case SDE_INTR_TOP_INTR:
  842. ret = ARRAY_SIZE(sde_irq_intr_map);
  843. break;
  844. case SDE_INTR_TOP_INTR2:
  845. ret = ARRAY_SIZE(sde_irq_intr2_map);
  846. break;
  847. case SDE_INTR_TOP_HIST_INTR:
  848. ret = ARRAY_SIZE(sde_irq_hist_map);
  849. break;
  850. default:
  851. pr_err("invalid top inst:%d\n", inst);
  852. }
  853. return ret;
  854. }
  855. static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
  856. {
  857. u32 ret = 0;
  858. switch (item->type) {
  859. case SDE_INTR_HWBLK_TOP:
  860. ret = _get_irq_map_size_top(item->instance_idx);
  861. break;
  862. case SDE_INTR_HWBLK_INTF:
  863. ret = ARRAY_SIZE(sde_irq_intf_map);
  864. break;
  865. case SDE_INTR_HWBLK_AD4:
  866. ret = ARRAY_SIZE(sde_irq_ad4_map);
  867. break;
  868. case SDE_INTR_HWBLK_INTF_TEAR:
  869. ret = ARRAY_SIZE(sde_irq_intf_te_map);
  870. break;
  871. case SDE_INTR_HWBLK_LTM:
  872. ret = ARRAY_SIZE(sde_irq_ltm_map);
  873. break;
  874. default:
  875. pr_err("invalid type: %d\n", item->type);
  876. }
  877. return ret;
  878. }
  879. static inline struct sde_irq_type *_get_irq_map_addr_top(
  880. enum sde_intr_top_intr inst)
  881. {
  882. struct sde_irq_type *ret = NULL;
  883. switch (inst) {
  884. case SDE_INTR_TOP_INTR:
  885. ret = sde_irq_intr_map;
  886. break;
  887. case SDE_INTR_TOP_INTR2:
  888. ret = sde_irq_intr2_map;
  889. break;
  890. case SDE_INTR_TOP_HIST_INTR:
  891. ret = sde_irq_hist_map;
  892. break;
  893. default:
  894. pr_err("invalid top inst:%d\n", inst);
  895. }
  896. return ret;
  897. }
  898. static inline struct sde_irq_type *_get_irq_map_addr(
  899. struct sde_intr_irq_offsets *item)
  900. {
  901. struct sde_irq_type *ret = NULL;
  902. switch (item->type) {
  903. case SDE_INTR_HWBLK_TOP:
  904. ret = _get_irq_map_addr_top(item->instance_idx);
  905. break;
  906. case SDE_INTR_HWBLK_INTF:
  907. ret = sde_irq_intf_map;
  908. break;
  909. case SDE_INTR_HWBLK_AD4:
  910. ret = sde_irq_ad4_map;
  911. break;
  912. case SDE_INTR_HWBLK_INTF_TEAR:
  913. ret = sde_irq_intf_te_map;
  914. break;
  915. case SDE_INTR_HWBLK_LTM:
  916. ret = sde_irq_ltm_map;
  917. break;
  918. default:
  919. pr_err("invalid type: %d\n", item->type);
  920. }
  921. return ret;
  922. }
  923. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  924. struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
  925. {
  926. int i, j = 0;
  927. struct sde_irq_type *src = _get_irq_map_addr(item);
  928. u32 src_size = _get_irq_map_size(item);
  929. if (!src)
  930. return -EINVAL;
  931. if (low_idx >= size || high_idx > size ||
  932. (high_idx - low_idx > src_size)) {
  933. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  934. low_idx, high_idx, size, src_size);
  935. return -EINVAL;
  936. }
  937. for (i = low_idx; i < high_idx; i++)
  938. sde_irq_map[i] = src[j++];
  939. return 0;
  940. }
  941. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  942. struct sde_mdss_cfg *m)
  943. {
  944. struct sde_intr_irq_offsets *item;
  945. int i, idx, sde_irq_tbl_idx = 0, ret = 0;
  946. u32 low_idx, high_idx;
  947. u32 sde_irq_map_idx = 0;
  948. /* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
  949. list_for_each_entry(item, &m->irq_offset_list, list) {
  950. low_idx = sde_irq_map_idx;
  951. high_idx = low_idx + _get_irq_map_size(item);
  952. pr_debug("init[%d]=%d low:%d high:%d\n",
  953. sde_irq_tbl_idx, idx, low_idx, high_idx);
  954. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  955. sde_irq_tbl_idx < 0) {
  956. ret = -EINVAL;
  957. goto exit;
  958. }
  959. /* init sde_irq_map with the global irq mapping table */
  960. if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
  961. item, low_idx, high_idx)) {
  962. ret = -EINVAL;
  963. goto exit;
  964. }
  965. /* init irq map with its reg & instance idxs in the irq tbl */
  966. for (i = low_idx; i < high_idx; i++) {
  967. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  968. if (item->type != SDE_INTR_HWBLK_TOP)
  969. intr->sde_irq_map[i].instance_idx =
  970. item->instance_idx;
  971. pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
  972. i, sde_irq_tbl_idx, item->instance_idx);
  973. }
  974. /* track the idx of the mapping table for this irq in
  975. * sde_irq_map, this to only access the indexes of this
  976. * irq during the irq dispatch
  977. */
  978. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
  979. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
  980. ret = _set_sde_irq_tbl_offset(
  981. &intr->sde_irq_tbl[sde_irq_tbl_idx], item);
  982. if (ret)
  983. goto exit;
  984. /* increment idx for both tables accordingly */
  985. sde_irq_tbl_idx++;
  986. sde_irq_map_idx = high_idx;
  987. }
  988. exit:
  989. sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
  990. return ret;
  991. }
  992. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  993. struct sde_mdss_cfg *m)
  994. {
  995. struct sde_hw_intr *intr = NULL;
  996. struct sde_mdss_base_cfg *cfg;
  997. struct sde_intr_irq_offsets *item;
  998. u32 irq_regs_count = 0;
  999. u32 irq_map_count = 0;
  1000. u32 size;
  1001. int ret = 0;
  1002. if (!addr || !m) {
  1003. ret = -EINVAL;
  1004. goto exit;
  1005. }
  1006. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  1007. if (!intr) {
  1008. ret = -ENOMEM;
  1009. goto exit;
  1010. }
  1011. cfg = __intr_offset(m, addr, &intr->hw);
  1012. if (!cfg) {
  1013. ret = -EINVAL;
  1014. goto exit;
  1015. }
  1016. __setup_intr_ops(&intr->ops);
  1017. /* check how many irq's this target supports */
  1018. list_for_each_entry(item, &m->irq_offset_list, list) {
  1019. size = _get_irq_map_size(item);
  1020. if (!size || irq_map_count >= UINT_MAX - size) {
  1021. pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
  1022. irq_regs_count, item->type, item->instance_idx,
  1023. size, irq_map_count);
  1024. ret = -EINVAL;
  1025. goto exit;
  1026. }
  1027. irq_regs_count++;
  1028. irq_map_count += size;
  1029. }
  1030. if (irq_regs_count == 0 || irq_map_count == 0) {
  1031. pr_err("invalid irq map: %d %d\n",
  1032. irq_regs_count, irq_map_count);
  1033. ret = -EINVAL;
  1034. goto exit;
  1035. }
  1036. /* Allocate table for the irq registers */
  1037. intr->sde_irq_size = irq_regs_count;
  1038. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  1039. GFP_KERNEL);
  1040. if (intr->sde_irq_tbl == NULL) {
  1041. ret = -ENOMEM;
  1042. goto exit;
  1043. }
  1044. /* Allocate table with the valid interrupts bits */
  1045. intr->sde_irq_map_size = irq_map_count;
  1046. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  1047. GFP_KERNEL);
  1048. if (intr->sde_irq_map == NULL) {
  1049. ret = -ENOMEM;
  1050. goto exit;
  1051. }
  1052. /* Initialize IRQs tables */
  1053. ret = _sde_hw_intr_init_irq_tables(intr, m);
  1054. if (ret)
  1055. goto exit;
  1056. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  1057. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  1058. if (intr->cache_irq_mask == NULL) {
  1059. ret = -ENOMEM;
  1060. goto exit;
  1061. }
  1062. intr->save_irq_status = kcalloc(intr->sde_irq_size,
  1063. sizeof(*intr->save_irq_status), GFP_KERNEL);
  1064. if (intr->save_irq_status == NULL) {
  1065. ret = -ENOMEM;
  1066. goto exit;
  1067. }
  1068. spin_lock_init(&intr->irq_lock);
  1069. exit:
  1070. if (ret) {
  1071. sde_hw_intr_destroy(intr);
  1072. return ERR_PTR(ret);
  1073. }
  1074. return intr;
  1075. }