sde_hw_interrupts.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/slab.h>
  7. #include "sde_kms.h"
  8. #include "sde_hw_interrupts.h"
  9. #include "sde_hw_util.h"
  10. #include "sde_hw_mdss.h"
  11. /**
  12. * Register offsets in MDSS register file for the interrupt registers
  13. * w.r.t. to the MDSS base
  14. */
  15. #define HW_INTR_STATUS 0x0010
  16. #define MDP_SSPP_TOP0_OFF 0x1000
  17. #define MDP_INTF_0_OFF 0x6B000
  18. #define MDP_INTF_1_OFF 0x6B800
  19. #define MDP_INTF_2_OFF 0x6C000
  20. #define MDP_INTF_3_OFF 0x6C800
  21. #define MDP_INTF_4_OFF 0x6D000
  22. #define MDP_AD4_0_OFF 0x7D000
  23. #define MDP_AD4_1_OFF 0x7E000
  24. #define MDP_AD4_INTR_EN_OFF 0x41c
  25. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  26. #define MDP_AD4_INTR_STATUS_OFF 0x420
  27. #define MDP_INTF_TEAR_INTF_1_IRQ_OFF 0x6E800
  28. #define MDP_INTF_TEAR_INTF_2_IRQ_OFF 0x6E900
  29. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  30. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  31. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  32. #define MDP_LTM_0_OFF 0x7F000
  33. #define MDP_LTM_1_OFF 0x7F100
  34. #define MDP_LTM_INTR_EN_OFF 0x50
  35. #define MDP_LTM_INTR_STATUS_OFF 0x54
  36. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  37. /**
  38. * WB interrupt status bit definitions
  39. */
  40. #define SDE_INTR_WB_0_DONE BIT(0)
  41. #define SDE_INTR_WB_1_DONE BIT(1)
  42. #define SDE_INTR_WB_2_DONE BIT(4)
  43. /**
  44. * WDOG timer interrupt status bit definitions
  45. */
  46. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  47. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  48. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  49. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  50. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  51. /**
  52. * Pingpong interrupt status bit definitions
  53. */
  54. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  55. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  56. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  57. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  58. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  59. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  60. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  61. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  62. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  63. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  64. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  65. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  66. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  67. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  68. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  69. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  70. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  71. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  72. /**
  73. * Interface interrupt status bit definitions
  74. */
  75. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  76. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  77. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  78. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  79. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  80. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  81. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  82. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  83. /**
  84. * Pingpong Secondary interrupt status bit definitions
  85. */
  86. #define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
  87. #define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
  88. #define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
  89. #define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
  90. #define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
  91. /**
  92. * Pingpong TEAR detection interrupt status bit definitions
  93. */
  94. #define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
  95. #define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
  96. #define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
  97. #define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
  98. /**
  99. * Pingpong TE detection interrupt status bit definitions
  100. */
  101. #define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
  102. #define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
  103. #define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
  104. #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
  105. /**
  106. * Ctl start interrupt status bit definitions
  107. */
  108. #define SDE_INTR_CTL_0_START BIT(9)
  109. #define SDE_INTR_CTL_1_START BIT(10)
  110. #define SDE_INTR_CTL_2_START BIT(11)
  111. #define SDE_INTR_CTL_3_START BIT(12)
  112. #define SDE_INTR_CTL_4_START BIT(13)
  113. #define SDE_INTR_CTL_5_START BIT(23)
  114. /**
  115. * Concurrent WB overflow interrupt status bit definitions
  116. */
  117. #define SDE_INTR_CWB_1_OVERFLOW BIT(8)
  118. #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
  119. #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
  120. #define SDE_INTR_CWB_4_OVERFLOW BIT(20)
  121. #define SDE_INTR_CWB_5_OVERFLOW BIT(21)
  122. /**
  123. * Histogram VIG done interrupt status bit definitions
  124. */
  125. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  126. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  127. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  128. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  129. /**
  130. * Histogram VIG reset Sequence done interrupt status bit definitions
  131. */
  132. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  133. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  134. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  135. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  136. /**
  137. * Histogram DSPP done interrupt status bit definitions
  138. */
  139. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  140. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  141. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  142. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  143. /**
  144. * Histogram DSPP reset Sequence done interrupt status bit definitions
  145. */
  146. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  147. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  148. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  149. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  150. /**
  151. * INTF interrupt status bit definitions
  152. */
  153. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  154. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  155. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  156. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  157. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  158. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  159. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  160. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  161. #define SDE_INTR_PROG_LINE BIT(8)
  162. /**
  163. * AD4 interrupt status bit definitions
  164. */
  165. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  166. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  167. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  168. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  169. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  170. /**
  171. * INTF Tear IRQ register bit definitions
  172. */
  173. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  174. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  175. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  176. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  177. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  178. /**
  179. * LTM interrupt status bit definitions
  180. */
  181. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  182. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  183. /**
  184. * struct sde_intr_reg - array of SDE register sets
  185. * @clr_off: offset to CLEAR reg
  186. * @en_off: offset to ENABLE reg
  187. * @status_off: offset to STATUS reg
  188. * @sde_irq_idx; global index in the 'sde_irq_map' table,
  189. * to know which interrupt type, instance, mask, etc. to use
  190. * @map_idx_start first offset in the sde_irq_map table
  191. * @map_idx_end last offset in the sde_irq_map table
  192. */
  193. struct sde_intr_reg {
  194. u32 clr_off;
  195. u32 en_off;
  196. u32 status_off;
  197. int sde_irq_idx;
  198. u32 map_idx_start;
  199. u32 map_idx_end;
  200. };
  201. /**
  202. * struct sde_irq_type - maps each irq with i/f
  203. * @intr_type: type of interrupt listed in sde_intr_type
  204. * @instance_idx: instance index of the associated HW block in SDE
  205. * @irq_mask: corresponding bit in the interrupt status reg
  206. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  207. * registers offsets to use. -1 = invalid offset
  208. */
  209. struct sde_irq_type {
  210. u32 intr_type;
  211. u32 instance_idx;
  212. u32 irq_mask;
  213. int reg_idx;
  214. };
  215. /**
  216. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  217. * a matching interface type and instance index.
  218. * Each of these tables are copied to a dynamically allocated
  219. * table, that will be used to service each of the irqs
  220. */
  221. static struct sde_irq_type sde_irq_intr_map[] = {
  222. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  223. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
  224. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  225. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  226. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  227. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  228. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  229. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  230. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  231. SDE_INTR_PING_PONG_0_DONE, -1},
  232. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  233. SDE_INTR_PING_PONG_1_DONE, -1},
  234. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  235. SDE_INTR_PING_PONG_2_DONE, -1},
  236. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  237. SDE_INTR_PING_PONG_3_DONE, -1},
  238. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  239. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  240. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  241. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  242. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  243. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  244. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  245. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  246. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  247. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  248. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  249. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  250. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  251. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  252. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  253. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  254. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  255. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  256. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  257. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  258. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  259. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  260. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  261. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  262. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  263. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  264. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  265. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  266. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  267. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  268. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  269. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  270. };
  271. static struct sde_irq_type sde_irq_intr2_map[] = {
  272. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
  273. SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
  274. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
  275. SDE_INTR_PING_PONG_S0_WR_PTR, -1},
  276. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
  277. SDE_INTR_PING_PONG_S0_RD_PTR, -1},
  278. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  279. SDE_INTR_CTL_0_START, -1},
  280. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  281. SDE_INTR_CTL_1_START, -1},
  282. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  283. SDE_INTR_CTL_2_START, -1},
  284. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  285. SDE_INTR_CTL_3_START, -1},
  286. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  287. SDE_INTR_CTL_4_START, -1},
  288. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  289. SDE_INTR_CTL_5_START, -1},
  290. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  291. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
  292. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
  293. SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
  294. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
  295. SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
  296. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
  297. SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
  298. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
  299. SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
  300. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
  301. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
  302. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
  303. SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
  304. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
  305. SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
  306. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
  307. SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
  308. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
  309. SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
  310. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
  311. SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
  312. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
  313. SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
  314. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  315. SDE_INTR_PING_PONG_4_DONE, -1},
  316. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  317. SDE_INTR_PING_PONG_5_DONE, -1},
  318. };
  319. static struct sde_irq_type sde_irq_hist_map[] = {
  320. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  321. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  322. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  323. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  324. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  325. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  326. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  327. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  328. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  329. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  330. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  331. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  332. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  333. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  334. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  335. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  336. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  337. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  338. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  339. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  340. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  341. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  342. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  343. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  344. };
  345. static struct sde_irq_type sde_irq_intf0_map[] = {
  346. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
  347. SDE_INTR_VIDEO_INTO_STATIC, -1},
  348. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
  349. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  350. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
  351. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  352. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
  353. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  354. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
  355. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  356. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
  357. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  358. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
  359. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  360. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
  361. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  362. { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, -1},
  363. };
  364. static struct sde_irq_type sde_irq_inf1_map[] = {
  365. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
  366. SDE_INTR_VIDEO_INTO_STATIC, -1},
  367. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
  368. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  369. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
  370. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  371. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
  372. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  373. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
  374. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  375. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
  376. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  377. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
  378. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  379. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
  380. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  381. { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, -1},
  382. };
  383. static struct sde_irq_type sde_irq_intf2_map[] = {
  384. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
  385. SDE_INTR_VIDEO_INTO_STATIC, -1},
  386. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
  387. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  388. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
  389. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  390. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
  391. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  392. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
  393. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  394. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
  395. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  396. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
  397. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  398. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
  399. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  400. { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, -1},
  401. };
  402. static struct sde_irq_type sde_irq_intf3_map[] = {
  403. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
  404. SDE_INTR_VIDEO_INTO_STATIC, -1},
  405. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
  406. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  407. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
  408. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  409. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
  410. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  411. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
  412. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  413. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
  414. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  415. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
  416. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  417. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
  418. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  419. { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, -1},
  420. };
  421. static struct sde_irq_type sde_irq_inf4_map[] = {
  422. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
  423. SDE_INTR_VIDEO_INTO_STATIC, -1},
  424. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
  425. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  426. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
  427. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  428. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
  429. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  430. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
  431. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  432. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
  433. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  434. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
  435. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  436. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
  437. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  438. { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, -1},
  439. };
  440. static struct sde_irq_type sde_irq_ad4_0_map[] = {
  441. { SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, -1},
  442. };
  443. static struct sde_irq_type sde_irq_ad4_1_map[] = {
  444. { SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  445. };
  446. static struct sde_irq_type sde_irq_intf1_te_map[] = {
  447. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
  448. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  449. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
  450. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  451. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
  452. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  453. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
  454. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  455. };
  456. static struct sde_irq_type sde_irq_intf2_te_map[] = {
  457. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
  458. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  459. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
  460. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  461. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
  462. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  463. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
  464. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  465. };
  466. static struct sde_irq_type sde_irq_ltm_0_map[] = {
  467. { SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_0, SDE_INTR_LTM_STATS_DONE, -1},
  468. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_0, SDE_INTR_LTM_STATS_WB_PB, -1},
  469. };
  470. static struct sde_irq_type sde_irq_ltm_1_map[] = {
  471. { SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_1, SDE_INTR_LTM_STATS_DONE, -1},
  472. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_1, SDE_INTR_LTM_STATS_WB_PB, -1},
  473. };
  474. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  475. enum sde_intr_type intr_type, u32 instance_idx)
  476. {
  477. int i;
  478. for (i = 0; i < intr->sde_irq_map_size; i++) {
  479. if (intr_type == intr->sde_irq_map[i].intr_type &&
  480. instance_idx == intr->sde_irq_map[i].instance_idx)
  481. return i;
  482. }
  483. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  484. intr_type, instance_idx);
  485. return -EINVAL;
  486. }
  487. static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
  488. uint32_t mask)
  489. {
  490. if (!intr)
  491. return;
  492. SDE_REG_WRITE(&intr->hw, reg_off, mask);
  493. /* ensure register writes go through */
  494. wmb();
  495. }
  496. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  497. void (*cbfunc)(void *, int),
  498. void *arg)
  499. {
  500. int reg_idx;
  501. int irq_idx;
  502. int start_idx;
  503. int end_idx;
  504. u32 irq_status;
  505. unsigned long irq_flags;
  506. int sde_irq_idx;
  507. if (!intr)
  508. return;
  509. /*
  510. * The dispatcher will save the IRQ status before calling here.
  511. * Now need to go through each IRQ status and find matching
  512. * irq lookup index.
  513. */
  514. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  515. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  516. irq_status = intr->save_irq_status[reg_idx];
  517. /* get the global offset in 'sde_irq_map' */
  518. sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx;
  519. if (sde_irq_idx < 0)
  520. continue;
  521. /*
  522. * Each Interrupt register has dynamic range of indexes,
  523. * initialized during hw_intr_init when sde_irq_tbl is created.
  524. */
  525. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  526. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  527. if (start_idx >= intr->sde_irq_map_size ||
  528. end_idx > intr->sde_irq_map_size)
  529. continue;
  530. /*
  531. * Search through matching intr status from irq map.
  532. * start_idx and end_idx defined the search range in
  533. * the sde_irq_map.
  534. */
  535. for (irq_idx = start_idx;
  536. (irq_idx < end_idx) && irq_status;
  537. irq_idx++)
  538. if ((irq_status &
  539. intr->sde_irq_map[irq_idx].irq_mask) &&
  540. (intr->sde_irq_map[irq_idx].reg_idx ==
  541. reg_idx)) {
  542. /*
  543. * Once a match on irq mask, perform a callback
  544. * to the given cbfunc. cbfunc will take care
  545. * the interrupt status clearing. If cbfunc is
  546. * not provided, then the interrupt clearing
  547. * is here.
  548. */
  549. if (cbfunc)
  550. cbfunc(arg, irq_idx);
  551. else
  552. intr->ops.clear_intr_status_nolock(
  553. intr, irq_idx);
  554. /*
  555. * When callback finish, clear the irq_status
  556. * with the matching mask. Once irq_status
  557. * is all cleared, the search can be stopped.
  558. */
  559. irq_status &=
  560. ~intr->sde_irq_map[irq_idx].irq_mask;
  561. }
  562. }
  563. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  564. }
  565. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  566. {
  567. int reg_idx;
  568. const struct sde_intr_reg *reg;
  569. const struct sde_irq_type *irq;
  570. const char *dbgstr = NULL;
  571. uint32_t cache_irq_mask;
  572. if (!intr)
  573. return -EINVAL;
  574. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  575. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  576. return -EINVAL;
  577. }
  578. irq = &intr->sde_irq_map[irq_idx];
  579. reg_idx = irq->reg_idx;
  580. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  581. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  582. return -EINVAL;
  583. }
  584. reg = &intr->sde_irq_tbl[reg_idx];
  585. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  586. if (cache_irq_mask & irq->irq_mask) {
  587. dbgstr = "SDE IRQ already set:";
  588. } else {
  589. dbgstr = "SDE IRQ enabled:";
  590. cache_irq_mask |= irq->irq_mask;
  591. /* Cleaning any pending interrupt */
  592. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  593. /* Enabling interrupts with the new mask */
  594. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  595. /* ensure register write goes through */
  596. wmb();
  597. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  598. }
  599. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  600. irq->irq_mask, cache_irq_mask);
  601. return 0;
  602. }
  603. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  604. {
  605. int reg_idx;
  606. const struct sde_intr_reg *reg;
  607. const struct sde_irq_type *irq;
  608. const char *dbgstr = NULL;
  609. uint32_t cache_irq_mask;
  610. if (!intr)
  611. return -EINVAL;
  612. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  613. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  614. return -EINVAL;
  615. }
  616. irq = &intr->sde_irq_map[irq_idx];
  617. reg_idx = irq->reg_idx;
  618. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  619. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  620. return -EINVAL;
  621. }
  622. reg = &intr->sde_irq_tbl[reg_idx];
  623. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  624. if ((cache_irq_mask & irq->irq_mask) == 0) {
  625. dbgstr = "SDE IRQ is already cleared:";
  626. } else {
  627. dbgstr = "SDE IRQ mask disable:";
  628. cache_irq_mask &= ~irq->irq_mask;
  629. /* Disable interrupts based on the new mask */
  630. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  631. /* Cleaning any pending interrupt */
  632. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  633. /* ensure register write goes through */
  634. wmb();
  635. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  636. }
  637. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  638. irq->irq_mask, cache_irq_mask);
  639. return 0;
  640. }
  641. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  642. {
  643. int i;
  644. if (!intr)
  645. return -EINVAL;
  646. for (i = 0; i < intr->sde_irq_size; i++)
  647. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  648. 0xffffffff);
  649. /* ensure register writes go through */
  650. wmb();
  651. return 0;
  652. }
  653. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  654. {
  655. int i;
  656. if (!intr)
  657. return -EINVAL;
  658. for (i = 0; i < intr->sde_irq_size; i++)
  659. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  660. 0x00000000);
  661. /* ensure register writes go through */
  662. wmb();
  663. return 0;
  664. }
  665. static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
  666. uint32_t *mask)
  667. {
  668. if (!intr || !mask)
  669. return -EINVAL;
  670. *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
  671. | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
  672. return 0;
  673. }
  674. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  675. uint32_t *sources)
  676. {
  677. if (!intr || !sources)
  678. return -EINVAL;
  679. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  680. return 0;
  681. }
  682. static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
  683. {
  684. int i;
  685. u32 enable_mask;
  686. unsigned long irq_flags;
  687. if (!intr)
  688. return;
  689. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  690. for (i = 0; i < intr->sde_irq_size; i++) {
  691. /* Read interrupt status */
  692. intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
  693. intr->sde_irq_tbl[i].status_off);
  694. /* Read enable mask */
  695. enable_mask = SDE_REG_READ(&intr->hw,
  696. intr->sde_irq_tbl[i].en_off);
  697. /* and clear the interrupt */
  698. if (intr->save_irq_status[i])
  699. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  700. intr->save_irq_status[i]);
  701. /* Finally update IRQ status based on enable mask */
  702. intr->save_irq_status[i] &= enable_mask;
  703. }
  704. /* ensure register writes go through */
  705. wmb();
  706. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  707. }
  708. static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
  709. int irq_idx, u32 irq_mask)
  710. {
  711. int reg_idx;
  712. if (!intr)
  713. return;
  714. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  715. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  716. return;
  717. }
  718. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  719. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  720. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  721. return;
  722. }
  723. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  724. irq_mask);
  725. /* ensure register writes go through */
  726. wmb();
  727. }
  728. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  729. int irq_idx)
  730. {
  731. int reg_idx;
  732. if (!intr)
  733. return;
  734. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  735. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  736. return;
  737. }
  738. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  739. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  740. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  741. return;
  742. }
  743. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  744. intr->sde_irq_map[irq_idx].irq_mask);
  745. /* ensure register writes go through */
  746. wmb();
  747. }
  748. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  749. int irq_idx)
  750. {
  751. unsigned long irq_flags;
  752. if (!intr)
  753. return;
  754. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  755. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  756. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  757. }
  758. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  759. int irq_idx, bool clear)
  760. {
  761. int reg_idx;
  762. u32 intr_status;
  763. if (!intr)
  764. return 0;
  765. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  766. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  767. return 0;
  768. }
  769. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  770. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  771. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  772. return 0;
  773. }
  774. intr_status = SDE_REG_READ(&intr->hw,
  775. intr->sde_irq_tbl[reg_idx].status_off) &
  776. intr->sde_irq_map[irq_idx].irq_mask;
  777. if (intr_status && clear)
  778. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  779. intr_status);
  780. /* ensure register writes go through */
  781. wmb();
  782. return intr_status;
  783. }
  784. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  785. int irq_idx, bool clear)
  786. {
  787. int reg_idx;
  788. unsigned long irq_flags;
  789. u32 intr_status;
  790. if (!intr)
  791. return 0;
  792. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  793. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  794. return 0;
  795. }
  796. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  797. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  798. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  799. return 0;
  800. }
  801. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  802. intr_status = SDE_REG_READ(&intr->hw,
  803. intr->sde_irq_tbl[reg_idx].status_off) &
  804. intr->sde_irq_map[irq_idx].irq_mask;
  805. if (intr_status && clear)
  806. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  807. intr_status);
  808. /* ensure register writes go through */
  809. wmb();
  810. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  811. return intr_status;
  812. }
  813. static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
  814. int irq_idx, bool clear)
  815. {
  816. int reg_idx;
  817. unsigned long irq_flags;
  818. u32 intr_status = 0;
  819. if (!intr)
  820. return 0;
  821. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  822. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  823. return 0;
  824. }
  825. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  826. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  827. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  828. return 0;
  829. }
  830. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  831. intr_status = SDE_REG_READ(&intr->hw,
  832. intr->sde_irq_tbl[reg_idx].status_off);
  833. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  834. return intr_status;
  835. }
  836. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  837. {
  838. ops->set_mask = sde_hw_intr_set_mask;
  839. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  840. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  841. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  842. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  843. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  844. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  845. ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
  846. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  847. ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
  848. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  849. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  850. ops->clear_intr_status_force_mask =
  851. sde_hw_intr_clear_intr_status_force_mask;
  852. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  853. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  854. ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
  855. }
  856. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  857. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  858. {
  859. if (!m || !addr || !hw || m->mdp_count == 0)
  860. return NULL;
  861. hw->base_off = addr;
  862. hw->blk_off = m->mdss[0].base;
  863. hw->hwversion = m->hwversion;
  864. return &m->mdss[0];
  865. }
  866. static inline int _sde_hw_intr_init_sde_irq_tbl(u32 irq_tbl_size,
  867. struct sde_intr_reg *sde_irq_tbl)
  868. {
  869. int idx;
  870. struct sde_intr_reg *sde_irq;
  871. for (idx = 0; idx < irq_tbl_size; idx++) {
  872. sde_irq = &sde_irq_tbl[idx];
  873. switch (sde_irq->sde_irq_idx) {
  874. case MDSS_INTR_SSPP_TOP0_INTR:
  875. sde_irq->clr_off =
  876. MDP_SSPP_TOP0_OFF+INTR_CLEAR;
  877. sde_irq->en_off =
  878. MDP_SSPP_TOP0_OFF+INTR_EN;
  879. sde_irq->status_off =
  880. MDP_SSPP_TOP0_OFF+INTR_STATUS;
  881. break;
  882. case MDSS_INTR_SSPP_TOP0_INTR2:
  883. sde_irq->clr_off =
  884. MDP_SSPP_TOP0_OFF+INTR2_CLEAR;
  885. sde_irq->en_off =
  886. MDP_SSPP_TOP0_OFF+INTR2_EN;
  887. sde_irq->status_off =
  888. MDP_SSPP_TOP0_OFF+INTR2_STATUS;
  889. break;
  890. case MDSS_INTR_SSPP_TOP0_HIST_INTR:
  891. sde_irq->clr_off =
  892. MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR;
  893. sde_irq->en_off =
  894. MDP_SSPP_TOP0_OFF+HIST_INTR_EN;
  895. sde_irq->status_off =
  896. MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS;
  897. break;
  898. case MDSS_INTR_INTF_0_INTR:
  899. sde_irq->clr_off =
  900. MDP_INTF_0_OFF+INTF_INTR_CLEAR;
  901. sde_irq->en_off =
  902. MDP_INTF_0_OFF+INTF_INTR_EN;
  903. sde_irq->status_off =
  904. MDP_INTF_0_OFF+INTF_INTR_STATUS;
  905. break;
  906. case MDSS_INTR_INTF_1_INTR:
  907. sde_irq->clr_off =
  908. MDP_INTF_1_OFF+INTF_INTR_CLEAR;
  909. sde_irq->en_off =
  910. MDP_INTF_1_OFF+INTF_INTR_EN;
  911. sde_irq->status_off =
  912. MDP_INTF_1_OFF+INTF_INTR_STATUS;
  913. break;
  914. case MDSS_INTR_INTF_2_INTR:
  915. sde_irq->clr_off =
  916. MDP_INTF_2_OFF+INTF_INTR_CLEAR;
  917. sde_irq->en_off =
  918. MDP_INTF_2_OFF+INTF_INTR_EN;
  919. sde_irq->status_off =
  920. MDP_INTF_2_OFF+INTF_INTR_STATUS;
  921. break;
  922. case MDSS_INTR_INTF_3_INTR:
  923. sde_irq->clr_off =
  924. MDP_INTF_3_OFF+INTF_INTR_CLEAR;
  925. sde_irq->en_off =
  926. MDP_INTF_3_OFF+INTF_INTR_EN;
  927. sde_irq->status_off =
  928. MDP_INTF_3_OFF+INTF_INTR_STATUS;
  929. break;
  930. case MDSS_INTR_INTF_4_INTR:
  931. sde_irq->clr_off =
  932. MDP_INTF_4_OFF+INTF_INTR_CLEAR;
  933. sde_irq->en_off =
  934. MDP_INTF_4_OFF+INTF_INTR_EN;
  935. sde_irq->status_off =
  936. MDP_INTF_4_OFF+INTF_INTR_STATUS;
  937. break;
  938. case MDSS_INTR_AD4_0_INTR:
  939. sde_irq->clr_off =
  940. MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF;
  941. sde_irq->en_off =
  942. MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF;
  943. sde_irq->status_off =
  944. MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF;
  945. break;
  946. case MDSS_INTR_AD4_1_INTR:
  947. sde_irq->clr_off =
  948. MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF;
  949. sde_irq->en_off =
  950. MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF;
  951. sde_irq->status_off =
  952. MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF;
  953. break;
  954. case MDSS_INTF_TEAR_1_INTR:
  955. sde_irq->clr_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
  956. MDP_INTF_TEAR_INTR_CLEAR_OFF;
  957. sde_irq->en_off =
  958. MDP_INTF_TEAR_INTF_1_IRQ_OFF +
  959. MDP_INTF_TEAR_INTR_EN_OFF;
  960. sde_irq->status_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
  961. MDP_INTF_TEAR_INTR_STATUS_OFF;
  962. break;
  963. case MDSS_INTF_TEAR_2_INTR:
  964. sde_irq->clr_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
  965. MDP_INTF_TEAR_INTR_CLEAR_OFF;
  966. sde_irq->en_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
  967. MDP_INTF_TEAR_INTR_EN_OFF;
  968. sde_irq->status_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
  969. MDP_INTF_TEAR_INTR_STATUS_OFF;
  970. break;
  971. case MDSS_INTR_LTM_0_INTR:
  972. sde_irq->clr_off =
  973. MDP_LTM_0_OFF + MDP_LTM_INTR_CLEAR_OFF;
  974. sde_irq->en_off =
  975. MDP_LTM_0_OFF + MDP_LTM_INTR_EN_OFF;
  976. sde_irq->status_off =
  977. MDP_LTM_0_OFF + MDP_LTM_INTR_STATUS_OFF;
  978. break;
  979. case MDSS_INTR_LTM_1_INTR:
  980. sde_irq->clr_off =
  981. MDP_LTM_1_OFF + MDP_LTM_INTR_CLEAR_OFF;
  982. sde_irq->en_off =
  983. MDP_LTM_1_OFF + MDP_LTM_INTR_EN_OFF;
  984. sde_irq->status_off =
  985. MDP_LTM_1_OFF + MDP_LTM_INTR_STATUS_OFF;
  986. break;
  987. default:
  988. pr_err("wrong irq idx %d\n",
  989. sde_irq->sde_irq_idx);
  990. return -EINVAL;
  991. }
  992. pr_debug("idx:%d irq_idx:%d clr:0x%x en:0x%x status:0x%x\n",
  993. idx, sde_irq->sde_irq_idx, sde_irq->clr_off,
  994. sde_irq->en_off, sde_irq->status_off);
  995. }
  996. return 0;
  997. }
  998. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  999. {
  1000. if (intr) {
  1001. kfree(intr->sde_irq_tbl);
  1002. kfree(intr->sde_irq_map);
  1003. kfree(intr->cache_irq_mask);
  1004. kfree(intr->save_irq_status);
  1005. kfree(intr);
  1006. }
  1007. }
  1008. static inline u32 _get_irq_map_size(int idx)
  1009. {
  1010. u32 ret = 0;
  1011. switch (idx) {
  1012. case MDSS_INTR_SSPP_TOP0_INTR:
  1013. ret = ARRAY_SIZE(sde_irq_intr_map);
  1014. break;
  1015. case MDSS_INTR_SSPP_TOP0_INTR2:
  1016. ret = ARRAY_SIZE(sde_irq_intr2_map);
  1017. break;
  1018. case MDSS_INTR_SSPP_TOP0_HIST_INTR:
  1019. ret = ARRAY_SIZE(sde_irq_hist_map);
  1020. break;
  1021. case MDSS_INTR_INTF_0_INTR:
  1022. ret = ARRAY_SIZE(sde_irq_intf0_map);
  1023. break;
  1024. case MDSS_INTR_INTF_1_INTR:
  1025. ret = ARRAY_SIZE(sde_irq_inf1_map);
  1026. break;
  1027. case MDSS_INTR_INTF_2_INTR:
  1028. ret = ARRAY_SIZE(sde_irq_intf2_map);
  1029. break;
  1030. case MDSS_INTR_INTF_3_INTR:
  1031. ret = ARRAY_SIZE(sde_irq_intf3_map);
  1032. break;
  1033. case MDSS_INTR_INTF_4_INTR:
  1034. ret = ARRAY_SIZE(sde_irq_inf4_map);
  1035. break;
  1036. case MDSS_INTR_AD4_0_INTR:
  1037. ret = ARRAY_SIZE(sde_irq_ad4_0_map);
  1038. break;
  1039. case MDSS_INTR_AD4_1_INTR:
  1040. ret = ARRAY_SIZE(sde_irq_ad4_1_map);
  1041. break;
  1042. case MDSS_INTF_TEAR_1_INTR:
  1043. ret = ARRAY_SIZE(sde_irq_intf1_te_map);
  1044. break;
  1045. case MDSS_INTF_TEAR_2_INTR:
  1046. ret = ARRAY_SIZE(sde_irq_intf2_te_map);
  1047. break;
  1048. case MDSS_INTR_LTM_0_INTR:
  1049. ret = ARRAY_SIZE(sde_irq_ltm_0_map);
  1050. break;
  1051. case MDSS_INTR_LTM_1_INTR:
  1052. ret = ARRAY_SIZE(sde_irq_ltm_1_map);
  1053. break;
  1054. default:
  1055. pr_err("invalid idx:%d\n", idx);
  1056. }
  1057. return ret;
  1058. }
  1059. static inline struct sde_irq_type *_get_irq_map_addr(int idx)
  1060. {
  1061. struct sde_irq_type *ret = NULL;
  1062. switch (idx) {
  1063. case MDSS_INTR_SSPP_TOP0_INTR:
  1064. ret = sde_irq_intr_map;
  1065. break;
  1066. case MDSS_INTR_SSPP_TOP0_INTR2:
  1067. ret = sde_irq_intr2_map;
  1068. break;
  1069. case MDSS_INTR_SSPP_TOP0_HIST_INTR:
  1070. ret = sde_irq_hist_map;
  1071. break;
  1072. case MDSS_INTR_INTF_0_INTR:
  1073. ret = sde_irq_intf0_map;
  1074. break;
  1075. case MDSS_INTR_INTF_1_INTR:
  1076. ret = sde_irq_inf1_map;
  1077. break;
  1078. case MDSS_INTR_INTF_2_INTR:
  1079. ret = sde_irq_intf2_map;
  1080. break;
  1081. case MDSS_INTR_INTF_3_INTR:
  1082. ret = sde_irq_intf3_map;
  1083. break;
  1084. case MDSS_INTR_INTF_4_INTR:
  1085. ret = sde_irq_inf4_map;
  1086. break;
  1087. case MDSS_INTR_AD4_0_INTR:
  1088. ret = sde_irq_ad4_0_map;
  1089. break;
  1090. case MDSS_INTR_AD4_1_INTR:
  1091. ret = sde_irq_ad4_1_map;
  1092. break;
  1093. case MDSS_INTF_TEAR_1_INTR:
  1094. ret = sde_irq_intf1_te_map;
  1095. break;
  1096. case MDSS_INTF_TEAR_2_INTR:
  1097. ret = sde_irq_intf2_te_map;
  1098. break;
  1099. case MDSS_INTR_LTM_0_INTR:
  1100. ret = sde_irq_ltm_0_map;
  1101. break;
  1102. case MDSS_INTR_LTM_1_INTR:
  1103. ret = sde_irq_ltm_1_map;
  1104. break;
  1105. default:
  1106. pr_err("invalid idx:%d\n", idx);
  1107. }
  1108. return ret;
  1109. }
  1110. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  1111. u32 irq_idx, u32 low_idx, u32 high_idx)
  1112. {
  1113. int i, j = 0;
  1114. struct sde_irq_type *src = _get_irq_map_addr(irq_idx);
  1115. u32 src_size = _get_irq_map_size(irq_idx);
  1116. if (!src)
  1117. return -EINVAL;
  1118. if (low_idx >= size || high_idx > size ||
  1119. (high_idx - low_idx > src_size)) {
  1120. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  1121. low_idx, high_idx, size, src_size);
  1122. return -EINVAL;
  1123. }
  1124. for (i = low_idx; i < high_idx; i++)
  1125. sde_irq_map[i] = src[j++];
  1126. return 0;
  1127. }
  1128. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  1129. struct sde_mdss_cfg *m)
  1130. {
  1131. int i, idx, sde_irq_tbl_idx = 0, ret = 0;
  1132. u32 low_idx, high_idx;
  1133. u32 sde_irq_map_idx = 0;
  1134. /* Initialize the offset of the irq's in the sde_irq_map table */
  1135. for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
  1136. if (test_bit(idx, m->mdss_irqs)) {
  1137. low_idx = sde_irq_map_idx;
  1138. high_idx = low_idx + _get_irq_map_size(idx);
  1139. pr_debug("init[%d]=%d low:%d high:%d\n",
  1140. sde_irq_tbl_idx, idx, low_idx, high_idx);
  1141. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  1142. sde_irq_tbl_idx < 0) {
  1143. ret = -EINVAL;
  1144. goto exit;
  1145. }
  1146. /* init sde_irq_map with the global irq mapping table */
  1147. if (_sde_copy_regs(intr->sde_irq_map,
  1148. intr->sde_irq_map_size,
  1149. idx, low_idx, high_idx)) {
  1150. ret = -EINVAL;
  1151. goto exit;
  1152. }
  1153. /* init irq map with its reg idx within the irq tbl */
  1154. for (i = low_idx; i < high_idx; i++) {
  1155. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  1156. pr_debug("sde_irq_map[%d].reg_idx=%d\n",
  1157. i, sde_irq_tbl_idx);
  1158. }
  1159. /* track the idx of the mapping table for this irq in
  1160. * sde_irq_map, this to only access the indexes of this
  1161. * irq during the irq dispatch
  1162. */
  1163. intr->sde_irq_tbl[sde_irq_tbl_idx].sde_irq_idx = idx;
  1164. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start =
  1165. low_idx;
  1166. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end =
  1167. high_idx;
  1168. /* increment idx for both tables accordingly */
  1169. sde_irq_tbl_idx++;
  1170. sde_irq_map_idx = high_idx;
  1171. }
  1172. }
  1173. /* do this after 'sde_irq_idx is initialized in sde_irq_tbl */
  1174. ret = _sde_hw_intr_init_sde_irq_tbl(intr->sde_irq_size,
  1175. intr->sde_irq_tbl);
  1176. exit:
  1177. return ret;
  1178. }
  1179. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  1180. struct sde_mdss_cfg *m)
  1181. {
  1182. struct sde_hw_intr *intr = NULL;
  1183. struct sde_mdss_base_cfg *cfg;
  1184. u32 irq_regs_count = 0;
  1185. u32 irq_map_count = 0;
  1186. u32 size;
  1187. int idx;
  1188. int ret = 0;
  1189. if (!addr || !m) {
  1190. ret = -EINVAL;
  1191. goto exit;
  1192. }
  1193. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  1194. if (!intr) {
  1195. ret = -ENOMEM;
  1196. goto exit;
  1197. }
  1198. cfg = __intr_offset(m, addr, &intr->hw);
  1199. if (!cfg) {
  1200. ret = -EINVAL;
  1201. goto exit;
  1202. }
  1203. __setup_intr_ops(&intr->ops);
  1204. if (MDSS_INTR_MAX >= UINT_MAX) {
  1205. pr_err("max intr exceeded:%d\n", MDSS_INTR_MAX);
  1206. ret = -EINVAL;
  1207. goto exit;
  1208. }
  1209. /* check how many irq's this target supports */
  1210. for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
  1211. if (test_bit(idx, m->mdss_irqs)) {
  1212. irq_regs_count++;
  1213. size = _get_irq_map_size(idx);
  1214. if (!size || irq_map_count >= UINT_MAX - size) {
  1215. pr_err("wrong map cnt idx:%d sz:%d cnt:%d\n",
  1216. idx, size, irq_map_count);
  1217. ret = -EINVAL;
  1218. goto exit;
  1219. }
  1220. irq_map_count += size;
  1221. }
  1222. }
  1223. if (irq_regs_count == 0 || irq_regs_count > MDSS_INTR_MAX ||
  1224. irq_map_count == 0) {
  1225. pr_err("wrong mapping of supported irqs 0x%lx\n",
  1226. m->mdss_irqs[0]);
  1227. ret = -EINVAL;
  1228. goto exit;
  1229. }
  1230. /* Allocate table for the irq registers */
  1231. intr->sde_irq_size = irq_regs_count;
  1232. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  1233. GFP_KERNEL);
  1234. if (intr->sde_irq_tbl == NULL) {
  1235. ret = -ENOMEM;
  1236. goto exit;
  1237. }
  1238. /* Allocate table with the valid interrupts bits */
  1239. intr->sde_irq_map_size = irq_map_count;
  1240. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  1241. GFP_KERNEL);
  1242. if (intr->sde_irq_map == NULL) {
  1243. ret = -ENOMEM;
  1244. goto exit;
  1245. }
  1246. /* Initialize IRQs tables */
  1247. ret = _sde_hw_intr_init_irq_tables(intr, m);
  1248. if (ret)
  1249. goto exit;
  1250. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  1251. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  1252. if (intr->cache_irq_mask == NULL) {
  1253. ret = -ENOMEM;
  1254. goto exit;
  1255. }
  1256. intr->save_irq_status = kcalloc(intr->sde_irq_size,
  1257. sizeof(*intr->save_irq_status), GFP_KERNEL);
  1258. if (intr->save_irq_status == NULL) {
  1259. ret = -ENOMEM;
  1260. goto exit;
  1261. }
  1262. spin_lock_init(&intr->irq_lock);
  1263. exit:
  1264. if (ret) {
  1265. sde_hw_intr_destroy(intr);
  1266. return ERR_PTR(ret);
  1267. }
  1268. return intr;
  1269. }