sde_hw_interrupts.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/slab.h>
  8. #include "sde_kms.h"
  9. #include "sde_hw_interrupts.h"
  10. #include "sde_hw_util.h"
  11. #include "sde_hw_mdss.h"
  12. /**
  13. * Register offsets in MDSS register file for the interrupt registers
  14. * w.r.t. base for that block. Base offsets for IRQs should come from the
  15. * device tree and get stored in the catalog(irq_offset_list) until they
  16. * are added to the sde_irq_tbl during the table initialization.
  17. */
  18. #define HW_INTR_STATUS 0x0010
  19. #define MDP_AD4_INTR_EN_OFF 0x41c
  20. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  21. #define MDP_AD4_INTR_STATUS_OFF 0x420
  22. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  23. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  24. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  25. #define MDP_LTM_INTR_EN_OFF 0x50
  26. #define MDP_LTM_INTR_STATUS_OFF 0x54
  27. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  28. #define MDP_WB_INTR_EN_OFF 0x18C
  29. #define MDP_WB_INTR_STATUS_OFF 0x190
  30. #define MDP_WB_INTR_CLEAR_OFF 0x194
  31. /**
  32. * WB interrupt status bit definitions
  33. */
  34. #define SDE_INTR_WB_0_DONE BIT(0)
  35. #define SDE_INTR_WB_1_DONE BIT(1)
  36. #define SDE_INTR_WB_2_DONE BIT(4)
  37. /**
  38. * WDOG timer interrupt status bit definitions
  39. */
  40. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  41. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  42. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  43. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  44. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  45. /**
  46. * Pingpong interrupt status bit definitions
  47. */
  48. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  49. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  50. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  51. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  52. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  53. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  54. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  55. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  56. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  57. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  58. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  59. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  60. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  61. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  62. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  63. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  64. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  65. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  66. /**
  67. * Interface interrupt status bit definitions
  68. */
  69. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  70. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  71. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  72. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  73. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  74. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  75. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  76. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  77. /**
  78. * Ctl start interrupt status bit definitions
  79. */
  80. #define SDE_INTR_CTL_0_START BIT(9)
  81. #define SDE_INTR_CTL_1_START BIT(10)
  82. #define SDE_INTR_CTL_2_START BIT(11)
  83. #define SDE_INTR_CTL_3_START BIT(12)
  84. #define SDE_INTR_CTL_4_START BIT(13)
  85. #define SDE_INTR_CTL_5_START BIT(23)
  86. /**
  87. * Ctl done interrupt status bit definitions
  88. */
  89. #define SDE_INTR_CTL_0_DONE BIT(0)
  90. #define SDE_INTR_CTL_1_DONE BIT(1)
  91. #define SDE_INTR_CTL_2_DONE BIT(2)
  92. #define SDE_INTR_CTL_3_DONE BIT(3)
  93. #define SDE_INTR_CTL_4_DONE BIT(4)
  94. #define SDE_INTR_CTL_5_DONE BIT(5)
  95. /**
  96. * Concurrent WB overflow interrupt status bit definitions
  97. */
  98. #define SDE_INTR_CWB_OVERFLOW BIT(29)
  99. #define SDE_INTR_CWB_2_OVERFLOW BIT(28)
  100. /**
  101. * Histogram VIG done interrupt status bit definitions
  102. */
  103. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  104. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  105. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  106. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  107. /**
  108. * Histogram VIG reset Sequence done interrupt status bit definitions
  109. */
  110. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  111. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  112. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  113. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  114. /**
  115. * Histogram DSPP done interrupt status bit definitions
  116. */
  117. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  118. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  119. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  120. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  121. /**
  122. * Histogram DSPP reset Sequence done interrupt status bit definitions
  123. */
  124. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  125. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  126. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  127. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  128. /**
  129. * INTF interrupt status bit definitions
  130. */
  131. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  132. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  133. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  134. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  135. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  136. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  137. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  138. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  139. #define SDE_INTR_PROG_LINE BIT(8)
  140. #define SDE_INTR_INTF_WD_TIMER_0_DONE BIT(13)
  141. /**
  142. * AD4 interrupt status bit definitions
  143. */
  144. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  145. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  146. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  147. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  148. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  149. /**
  150. * INTF Tear IRQ register bit definitions
  151. */
  152. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  153. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  154. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  155. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  156. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  157. /**
  158. * LTM interrupt status bit definitions
  159. */
  160. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  161. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  162. /**
  163. * WB interrupt status bit definitions
  164. */
  165. #define SDE_INTR_WB_PROG_LINE BIT(0)
  166. /**
  167. * struct sde_intr_reg - array of SDE register sets
  168. * @clr_off: offset to CLEAR reg
  169. * @en_off: offset to ENABLE reg
  170. * @status_off: offset to STATUS reg
  171. * @map_idx_start first offset in the sde_irq_map table
  172. * @map_idx_end last offset in the sde_irq_map table
  173. */
  174. struct sde_intr_reg {
  175. u32 clr_off;
  176. u32 en_off;
  177. u32 status_off;
  178. u32 map_idx_start;
  179. u32 map_idx_end;
  180. };
  181. /**
  182. * struct sde_irq_type - maps each irq with i/f
  183. * @intr_type: type of interrupt listed in sde_intr_type
  184. * @instance_idx: instance index of the associated HW block in SDE
  185. * @irq_mask: corresponding bit in the interrupt status reg
  186. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  187. * registers offsets to use.
  188. */
  189. struct sde_irq_type {
  190. u32 intr_type;
  191. u32 instance_idx;
  192. u32 irq_mask;
  193. int reg_idx;
  194. };
  195. /**
  196. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  197. * a matching interface type and instance index.
  198. * Each of these tables are copied to a dynamically allocated
  199. * table, that will be used to service each of the irqs
  200. * -1 indicates an uninitialized value which should be set when copying
  201. * these tables to the sde_irq_map.
  202. */
  203. static struct sde_irq_type sde_irq_intr_map[] = {
  204. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  205. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
  206. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  207. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  208. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  209. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  210. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  211. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  212. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  213. SDE_INTR_PING_PONG_0_DONE, -1},
  214. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  215. SDE_INTR_PING_PONG_1_DONE, -1},
  216. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  217. SDE_INTR_PING_PONG_2_DONE, -1},
  218. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  219. SDE_INTR_PING_PONG_3_DONE, -1},
  220. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  221. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  222. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  223. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  224. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  225. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  226. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  227. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  228. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  229. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  230. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  231. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  232. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  233. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  234. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  235. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  236. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  237. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  238. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  239. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  240. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  241. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  242. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  243. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  244. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  245. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  246. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  247. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  248. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  249. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  250. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  251. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  252. };
  253. static struct sde_irq_type sde_irq_intr2_map[] = {
  254. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  255. SDE_INTR_CTL_0_START, -1},
  256. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  257. SDE_INTR_CTL_1_START, -1},
  258. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  259. SDE_INTR_CTL_2_START, -1},
  260. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  261. SDE_INTR_CTL_3_START, -1},
  262. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  263. SDE_INTR_CTL_4_START, -1},
  264. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  265. SDE_INTR_CTL_5_START, -1},
  266. { SDE_IRQ_TYPE_CTL_DONE, CTL_0,
  267. SDE_INTR_CTL_0_DONE, -1},
  268. { SDE_IRQ_TYPE_CTL_DONE, CTL_1,
  269. SDE_INTR_CTL_1_DONE, -1},
  270. { SDE_IRQ_TYPE_CTL_DONE, CTL_2,
  271. SDE_INTR_CTL_2_DONE, -1},
  272. { SDE_IRQ_TYPE_CTL_DONE, CTL_3,
  273. SDE_INTR_CTL_3_DONE, -1},
  274. { SDE_IRQ_TYPE_CTL_DONE, CTL_4,
  275. SDE_INTR_CTL_4_DONE, -1},
  276. { SDE_IRQ_TYPE_CTL_DONE, CTL_5,
  277. SDE_INTR_CTL_5_DONE, -1},
  278. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_0, SDE_INTR_CWB_OVERFLOW, -1},
  279. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  280. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  281. SDE_INTR_PING_PONG_4_DONE, -1},
  282. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  283. SDE_INTR_PING_PONG_5_DONE, -1},
  284. };
  285. static struct sde_irq_type sde_irq_hist_map[] = {
  286. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  287. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  288. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  289. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  290. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  291. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  292. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  293. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  294. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  295. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  296. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  297. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  298. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  299. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  300. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  301. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  302. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  303. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  304. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  305. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  306. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  307. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  308. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  309. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  310. };
  311. static struct sde_irq_type sde_irq_intf_map[] = {
  312. { SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
  313. SDE_INTR_VIDEO_INTO_STATIC, -1},
  314. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
  315. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  316. { SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
  317. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  318. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
  319. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  320. { SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
  321. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  322. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
  323. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  324. { SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
  325. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  326. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
  327. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  328. { SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
  329. { SDE_IRQ_TYPE_WD_TIMER, -1, SDE_INTR_WD_TIMER_0_DONE, -1},
  330. };
  331. static struct sde_irq_type sde_irq_ad4_map[] = {
  332. { SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  333. };
  334. static struct sde_irq_type sde_irq_intf_te_map[] = {
  335. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
  336. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  337. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
  338. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  339. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
  340. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  341. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
  342. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  343. };
  344. static struct sde_irq_type sde_irq_ltm_map[] = {
  345. { SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
  346. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
  347. };
  348. static struct sde_irq_type sde_irq_wb_map[] = {
  349. { SDE_IRQ_TYPE_WB_PROG_LINE, -1, SDE_INTR_WB_PROG_LINE, -1},
  350. };
  351. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  352. enum sde_intr_type intr_type, u32 instance_idx)
  353. {
  354. int i;
  355. for (i = 0; i < intr->sde_irq_map_size; i++) {
  356. if (intr_type == intr->sde_irq_map[i].intr_type &&
  357. instance_idx == intr->sde_irq_map[i].instance_idx)
  358. return i;
  359. }
  360. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  361. intr_type, instance_idx);
  362. return -EINVAL;
  363. }
  364. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  365. void (*cbfunc)(void *, int),
  366. void *arg)
  367. {
  368. int reg_idx;
  369. int irq_idx;
  370. int start_idx;
  371. int end_idx;
  372. u32 irq_status;
  373. u32 enable_mask;
  374. unsigned long irq_flags;
  375. if (!intr)
  376. return;
  377. /*
  378. * The dispatcher will save the IRQ status before calling here.
  379. * Now need to go through each IRQ status and find matching
  380. * irq lookup index.
  381. */
  382. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  383. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  384. /*
  385. * Each Interrupt register has dynamic range of indexes,
  386. * initialized during hw_intr_init when sde_irq_tbl is created.
  387. */
  388. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  389. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  390. if (start_idx >= intr->sde_irq_map_size ||
  391. end_idx > intr->sde_irq_map_size)
  392. continue;
  393. /* Read interrupt status */
  394. irq_status = SDE_REG_READ(&intr->hw, intr->sde_irq_tbl[reg_idx].status_off);
  395. /* Read enable mask */
  396. enable_mask = SDE_REG_READ(&intr->hw, intr->sde_irq_tbl[reg_idx].en_off);
  397. /* and clear the interrupt */
  398. if (irq_status)
  399. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  400. irq_status);
  401. /* Finally update IRQ status based on enable mask */
  402. irq_status &= enable_mask;
  403. /*
  404. * Search through matching intr status from irq map.
  405. * start_idx and end_idx defined the search range in
  406. * the sde_irq_map.
  407. */
  408. for (irq_idx = start_idx;
  409. (irq_idx < end_idx) && irq_status;
  410. irq_idx++)
  411. if ((irq_status &
  412. intr->sde_irq_map[irq_idx].irq_mask) &&
  413. (intr->sde_irq_map[irq_idx].reg_idx ==
  414. reg_idx)) {
  415. /*
  416. * Once a match on irq mask, perform a callback
  417. * to the given cbfunc. cbfunc will take care
  418. * the interrupt status clearing. If cbfunc is
  419. * not provided, then the interrupt clearing
  420. * is here.
  421. */
  422. if (cbfunc)
  423. cbfunc(arg, irq_idx);
  424. else
  425. intr->ops.clear_intr_status_nolock(
  426. intr, irq_idx);
  427. /*
  428. * When callback finish, clear the irq_status
  429. * with the matching mask. Once irq_status
  430. * is all cleared, the search can be stopped.
  431. */
  432. irq_status &=
  433. ~intr->sde_irq_map[irq_idx].irq_mask;
  434. }
  435. }
  436. /* ensure register writes go through */
  437. wmb();
  438. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  439. }
  440. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  441. {
  442. int reg_idx;
  443. const struct sde_intr_reg *reg;
  444. const struct sde_irq_type *irq;
  445. const char *dbgstr = NULL;
  446. uint32_t cache_irq_mask;
  447. if (!intr)
  448. return -EINVAL;
  449. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  450. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  451. return -EINVAL;
  452. }
  453. irq = &intr->sde_irq_map[irq_idx];
  454. reg_idx = irq->reg_idx;
  455. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  456. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  457. return -EINVAL;
  458. }
  459. reg = &intr->sde_irq_tbl[reg_idx];
  460. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  461. if (cache_irq_mask & irq->irq_mask) {
  462. dbgstr = "SDE IRQ already set:";
  463. } else {
  464. dbgstr = "SDE IRQ enabled:";
  465. cache_irq_mask |= irq->irq_mask;
  466. /* Cleaning any pending interrupt */
  467. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  468. /* Enabling interrupts with the new mask */
  469. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  470. /* ensure register write goes through */
  471. wmb();
  472. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  473. }
  474. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  475. irq->irq_mask, cache_irq_mask);
  476. return 0;
  477. }
  478. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  479. {
  480. int reg_idx;
  481. const struct sde_intr_reg *reg;
  482. const struct sde_irq_type *irq;
  483. const char *dbgstr = NULL;
  484. uint32_t cache_irq_mask;
  485. if (!intr)
  486. return -EINVAL;
  487. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  488. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  489. return -EINVAL;
  490. }
  491. irq = &intr->sde_irq_map[irq_idx];
  492. reg_idx = irq->reg_idx;
  493. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  494. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  495. return -EINVAL;
  496. }
  497. reg = &intr->sde_irq_tbl[reg_idx];
  498. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  499. if ((cache_irq_mask & irq->irq_mask) == 0) {
  500. dbgstr = "SDE IRQ is already cleared:";
  501. } else {
  502. dbgstr = "SDE IRQ mask disable:";
  503. cache_irq_mask &= ~irq->irq_mask;
  504. /* Disable interrupts based on the new mask */
  505. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  506. /* Cleaning any pending interrupt */
  507. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  508. /* ensure register write goes through */
  509. wmb();
  510. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  511. }
  512. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  513. irq->irq_mask, cache_irq_mask);
  514. return 0;
  515. }
  516. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  517. {
  518. int i;
  519. if (!intr)
  520. return -EINVAL;
  521. for (i = 0; i < intr->sde_irq_size; i++)
  522. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  523. 0xffffffff);
  524. /* ensure register writes go through */
  525. wmb();
  526. return 0;
  527. }
  528. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  529. {
  530. int i;
  531. if (!intr)
  532. return -EINVAL;
  533. for (i = 0; i < intr->sde_irq_size; i++)
  534. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  535. 0x00000000);
  536. /* ensure register writes go through */
  537. wmb();
  538. return 0;
  539. }
  540. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  541. uint32_t *sources)
  542. {
  543. if (!intr || !sources)
  544. return -EINVAL;
  545. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  546. return 0;
  547. }
  548. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  549. int irq_idx)
  550. {
  551. int reg_idx;
  552. if (!intr)
  553. return;
  554. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  555. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  556. return;
  557. }
  558. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  559. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  560. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  561. return;
  562. }
  563. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  564. intr->sde_irq_map[irq_idx].irq_mask);
  565. /* ensure register writes go through */
  566. wmb();
  567. }
  568. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  569. int irq_idx)
  570. {
  571. unsigned long irq_flags;
  572. if (!intr)
  573. return;
  574. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  575. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  576. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  577. }
  578. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  579. int irq_idx, bool clear)
  580. {
  581. int reg_idx;
  582. u32 intr_status;
  583. if (!intr)
  584. return 0;
  585. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  586. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  587. return 0;
  588. }
  589. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  590. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  591. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  592. return 0;
  593. }
  594. intr_status = SDE_REG_READ(&intr->hw,
  595. intr->sde_irq_tbl[reg_idx].status_off) &
  596. intr->sde_irq_map[irq_idx].irq_mask;
  597. if (intr_status && clear)
  598. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  599. intr_status);
  600. /* ensure register writes go through */
  601. wmb();
  602. return intr_status;
  603. }
  604. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  605. int irq_idx, bool clear)
  606. {
  607. int reg_idx;
  608. unsigned long irq_flags;
  609. u32 intr_status;
  610. if (!intr)
  611. return 0;
  612. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  613. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  614. return 0;
  615. }
  616. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  617. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  618. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  619. return 0;
  620. }
  621. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  622. intr_status = SDE_REG_READ(&intr->hw,
  623. intr->sde_irq_tbl[reg_idx].status_off) &
  624. intr->sde_irq_map[irq_idx].irq_mask;
  625. if (intr_status && clear)
  626. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  627. intr_status);
  628. /* ensure register writes go through */
  629. wmb();
  630. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  631. return intr_status;
  632. }
  633. static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
  634. struct sde_intr_irq_offsets *item)
  635. {
  636. u32 base_offset;
  637. if (!sde_irq || !item)
  638. return -EINVAL;
  639. base_offset = item->base_offset;
  640. switch (item->instance_idx) {
  641. case SDE_INTR_TOP_INTR:
  642. sde_irq->clr_off = base_offset + INTR_CLEAR;
  643. sde_irq->en_off = base_offset + INTR_EN;
  644. sde_irq->status_off = base_offset + INTR_STATUS;
  645. break;
  646. case SDE_INTR_TOP_INTR2:
  647. sde_irq->clr_off = base_offset + INTR2_CLEAR;
  648. sde_irq->en_off = base_offset + INTR2_EN;
  649. sde_irq->status_off = base_offset + INTR2_STATUS;
  650. break;
  651. case SDE_INTR_TOP_HIST_INTR:
  652. sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
  653. sde_irq->en_off = base_offset + HIST_INTR_EN;
  654. sde_irq->status_off = base_offset + HIST_INTR_STATUS;
  655. break;
  656. default:
  657. pr_err("invalid TOP intr for instance %d\n",
  658. item->instance_idx);
  659. return -EINVAL;
  660. }
  661. return 0;
  662. }
  663. static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
  664. struct sde_intr_irq_offsets *item)
  665. {
  666. u32 base_offset, rc = 0;
  667. if (!sde_irq || !item)
  668. return -EINVAL;
  669. base_offset = item->base_offset;
  670. switch (item->type) {
  671. case SDE_INTR_HWBLK_TOP:
  672. rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
  673. break;
  674. case SDE_INTR_HWBLK_INTF:
  675. sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
  676. sde_irq->en_off = base_offset + INTF_INTR_EN;
  677. sde_irq->status_off = base_offset + INTF_INTR_STATUS;
  678. break;
  679. case SDE_INTR_HWBLK_AD4:
  680. sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
  681. sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
  682. sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
  683. break;
  684. case SDE_INTR_HWBLK_INTF_TEAR:
  685. sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
  686. sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
  687. sde_irq->status_off = base_offset +
  688. MDP_INTF_TEAR_INTR_STATUS_OFF;
  689. break;
  690. case SDE_INTR_HWBLK_LTM:
  691. sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
  692. sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
  693. sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
  694. break;
  695. case SDE_INTR_HWBLK_WB:
  696. sde_irq->clr_off = base_offset + MDP_WB_INTR_CLEAR_OFF;
  697. sde_irq->en_off = base_offset + MDP_WB_INTR_EN_OFF;
  698. sde_irq->status_off = base_offset + MDP_WB_INTR_STATUS_OFF;
  699. break;
  700. default:
  701. pr_err("unrecognized intr blk type %d\n",
  702. item->type);
  703. rc = -EINVAL;
  704. }
  705. return rc;
  706. }
  707. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  708. {
  709. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  710. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  711. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  712. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  713. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  714. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  715. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  716. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  717. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  718. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  719. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  720. }
  721. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  722. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  723. {
  724. if (!m || !addr || !hw || m->mdp_count == 0)
  725. return NULL;
  726. hw->base_off = addr;
  727. hw->blk_off = m->mdss[0].base;
  728. hw->hw_rev = m->hw_rev;
  729. return &m->mdss[0];
  730. }
  731. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  732. {
  733. if (intr) {
  734. kfree(intr->sde_irq_tbl);
  735. kfree(intr->sde_irq_map);
  736. kfree(intr->cache_irq_mask);
  737. kfree(intr);
  738. }
  739. }
  740. static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
  741. {
  742. u32 ret = 0;
  743. switch (inst) {
  744. case SDE_INTR_TOP_INTR:
  745. ret = ARRAY_SIZE(sde_irq_intr_map);
  746. break;
  747. case SDE_INTR_TOP_INTR2:
  748. ret = ARRAY_SIZE(sde_irq_intr2_map);
  749. break;
  750. case SDE_INTR_TOP_HIST_INTR:
  751. ret = ARRAY_SIZE(sde_irq_hist_map);
  752. break;
  753. default:
  754. pr_err("invalid top inst:%d\n", inst);
  755. }
  756. return ret;
  757. }
  758. static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
  759. {
  760. u32 ret = 0;
  761. switch (item->type) {
  762. case SDE_INTR_HWBLK_TOP:
  763. ret = _get_irq_map_size_top(item->instance_idx);
  764. break;
  765. case SDE_INTR_HWBLK_INTF:
  766. ret = ARRAY_SIZE(sde_irq_intf_map);
  767. break;
  768. case SDE_INTR_HWBLK_AD4:
  769. ret = ARRAY_SIZE(sde_irq_ad4_map);
  770. break;
  771. case SDE_INTR_HWBLK_INTF_TEAR:
  772. ret = ARRAY_SIZE(sde_irq_intf_te_map);
  773. break;
  774. case SDE_INTR_HWBLK_LTM:
  775. ret = ARRAY_SIZE(sde_irq_ltm_map);
  776. break;
  777. case SDE_INTR_HWBLK_WB:
  778. ret = ARRAY_SIZE(sde_irq_wb_map);
  779. break;
  780. default:
  781. pr_err("invalid type: %d\n", item->type);
  782. }
  783. return ret;
  784. }
  785. static inline struct sde_irq_type *_get_irq_map_addr_top(
  786. enum sde_intr_top_intr inst)
  787. {
  788. struct sde_irq_type *ret = NULL;
  789. switch (inst) {
  790. case SDE_INTR_TOP_INTR:
  791. ret = sde_irq_intr_map;
  792. break;
  793. case SDE_INTR_TOP_INTR2:
  794. ret = sde_irq_intr2_map;
  795. break;
  796. case SDE_INTR_TOP_HIST_INTR:
  797. ret = sde_irq_hist_map;
  798. break;
  799. default:
  800. pr_err("invalid top inst:%d\n", inst);
  801. }
  802. return ret;
  803. }
  804. static inline struct sde_irq_type *_get_irq_map_addr(
  805. struct sde_intr_irq_offsets *item)
  806. {
  807. struct sde_irq_type *ret = NULL;
  808. switch (item->type) {
  809. case SDE_INTR_HWBLK_TOP:
  810. ret = _get_irq_map_addr_top(item->instance_idx);
  811. break;
  812. case SDE_INTR_HWBLK_INTF:
  813. ret = sde_irq_intf_map;
  814. break;
  815. case SDE_INTR_HWBLK_AD4:
  816. ret = sde_irq_ad4_map;
  817. break;
  818. case SDE_INTR_HWBLK_INTF_TEAR:
  819. ret = sde_irq_intf_te_map;
  820. break;
  821. case SDE_INTR_HWBLK_LTM:
  822. ret = sde_irq_ltm_map;
  823. break;
  824. case SDE_INTR_HWBLK_WB:
  825. ret = sde_irq_wb_map;
  826. break;
  827. default:
  828. pr_err("invalid type: %d\n", item->type);
  829. }
  830. return ret;
  831. }
  832. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  833. struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
  834. {
  835. int i, j = 0;
  836. struct sde_irq_type *src = _get_irq_map_addr(item);
  837. u32 src_size = _get_irq_map_size(item);
  838. if (!src)
  839. return -EINVAL;
  840. if (low_idx >= size || high_idx > size ||
  841. (high_idx - low_idx > src_size)) {
  842. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  843. low_idx, high_idx, size, src_size);
  844. return -EINVAL;
  845. }
  846. for (i = low_idx; i < high_idx; i++)
  847. sde_irq_map[i] = src[j++];
  848. return 0;
  849. }
  850. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  851. struct sde_mdss_cfg *m)
  852. {
  853. struct sde_intr_irq_offsets *item;
  854. int i, sde_irq_tbl_idx = 0, ret = 0;
  855. u32 low_idx, high_idx;
  856. u32 sde_irq_map_idx = 0;
  857. /* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
  858. list_for_each_entry(item, &m->irq_offset_list, list) {
  859. low_idx = sde_irq_map_idx;
  860. high_idx = low_idx + _get_irq_map_size(item);
  861. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  862. sde_irq_tbl_idx < 0) {
  863. ret = -EINVAL;
  864. goto exit;
  865. }
  866. /* init sde_irq_map with the global irq mapping table */
  867. if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
  868. item, low_idx, high_idx)) {
  869. ret = -EINVAL;
  870. goto exit;
  871. }
  872. /* init irq map with its reg & instance idxs in the irq tbl */
  873. for (i = low_idx; i < high_idx; i++) {
  874. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  875. if (item->type != SDE_INTR_HWBLK_TOP)
  876. intr->sde_irq_map[i].instance_idx =
  877. item->instance_idx;
  878. pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
  879. i, sde_irq_tbl_idx, item->instance_idx);
  880. }
  881. /* track the idx of the mapping table for this irq in
  882. * sde_irq_map, this to only access the indexes of this
  883. * irq during the irq dispatch
  884. */
  885. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
  886. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
  887. ret = _set_sde_irq_tbl_offset(
  888. &intr->sde_irq_tbl[sde_irq_tbl_idx], item);
  889. if (ret)
  890. goto exit;
  891. /* increment idx for both tables accordingly */
  892. sde_irq_tbl_idx++;
  893. sde_irq_map_idx = high_idx;
  894. }
  895. exit:
  896. sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
  897. return ret;
  898. }
  899. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  900. struct sde_mdss_cfg *m)
  901. {
  902. struct sde_hw_intr *intr = NULL;
  903. struct sde_mdss_base_cfg *cfg;
  904. struct sde_intr_irq_offsets *item;
  905. u32 irq_regs_count = 0;
  906. u32 irq_map_count = 0;
  907. u32 size;
  908. int ret = 0;
  909. if (!addr || !m) {
  910. ret = -EINVAL;
  911. goto exit;
  912. }
  913. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  914. if (!intr) {
  915. ret = -ENOMEM;
  916. goto exit;
  917. }
  918. cfg = __intr_offset(m, addr, &intr->hw);
  919. if (!cfg) {
  920. ret = -EINVAL;
  921. goto exit;
  922. }
  923. __setup_intr_ops(&intr->ops);
  924. /* check how many irq's this target supports */
  925. list_for_each_entry(item, &m->irq_offset_list, list) {
  926. size = _get_irq_map_size(item);
  927. if (!size || irq_map_count >= UINT_MAX - size) {
  928. pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
  929. irq_regs_count, item->type, item->instance_idx,
  930. size, irq_map_count);
  931. ret = -EINVAL;
  932. goto exit;
  933. }
  934. irq_regs_count++;
  935. irq_map_count += size;
  936. }
  937. if (irq_regs_count == 0 || irq_map_count == 0) {
  938. pr_err("invalid irq map: %d %d\n",
  939. irq_regs_count, irq_map_count);
  940. ret = -EINVAL;
  941. goto exit;
  942. }
  943. /* Allocate table for the irq registers */
  944. intr->sde_irq_size = irq_regs_count;
  945. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  946. GFP_KERNEL);
  947. if (intr->sde_irq_tbl == NULL) {
  948. ret = -ENOMEM;
  949. goto exit;
  950. }
  951. /* Allocate table with the valid interrupts bits */
  952. intr->sde_irq_map_size = irq_map_count;
  953. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  954. GFP_KERNEL);
  955. if (intr->sde_irq_map == NULL) {
  956. ret = -ENOMEM;
  957. goto exit;
  958. }
  959. /* Initialize IRQs tables */
  960. ret = _sde_hw_intr_init_irq_tables(intr, m);
  961. if (ret)
  962. goto exit;
  963. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  964. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  965. if (intr->cache_irq_mask == NULL) {
  966. ret = -ENOMEM;
  967. goto exit;
  968. }
  969. spin_lock_init(&intr->irq_lock);
  970. exit:
  971. if (ret) {
  972. sde_hw_intr_destroy(intr);
  973. return ERR_PTR(ret);
  974. }
  975. return intr;
  976. }