sde_hw_interrupts.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/slab.h>
  7. #include "sde_kms.h"
  8. #include "sde_hw_interrupts.h"
  9. #include "sde_hw_util.h"
  10. #include "sde_hw_mdss.h"
  11. /**
  12. * Register offsets in MDSS register file for the interrupt registers
  13. * w.r.t. to the MDSS base
  14. */
  15. #define HW_INTR_STATUS 0x0010
  16. #define MDP_SSPP_TOP0_OFF 0x1000
  17. #define MDP_INTF_0_OFF 0x6B000
  18. #define MDP_INTF_1_OFF 0x6B800
  19. #define MDP_INTF_2_OFF 0x6C000
  20. #define MDP_INTF_3_OFF 0x6C800
  21. #define MDP_INTF_4_OFF 0x6D000
  22. #define MDP_AD4_0_OFF 0x7D000
  23. #define MDP_AD4_1_OFF 0x7E000
  24. #define MDP_AD4_INTR_EN_OFF 0x41c
  25. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  26. #define MDP_AD4_INTR_STATUS_OFF 0x420
  27. #define MDP_INTF_TEAR_INTF_1_IRQ_OFF 0x6E800
  28. #define MDP_INTF_TEAR_INTF_2_IRQ_OFF 0x6E900
  29. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  30. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  31. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  32. #define MDP_LTM_0_OFF 0x7F000
  33. #define MDP_LTM_1_OFF 0x7F100
  34. #define MDP_LTM_INTR_EN_OFF 0x50
  35. #define MDP_LTM_INTR_STATUS_OFF 0x54
  36. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  37. /**
  38. * WB interrupt status bit definitions
  39. */
  40. #define SDE_INTR_WB_0_DONE BIT(0)
  41. #define SDE_INTR_WB_1_DONE BIT(1)
  42. #define SDE_INTR_WB_2_DONE BIT(4)
  43. /**
  44. * WDOG timer interrupt status bit definitions
  45. */
  46. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  47. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  48. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  49. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  50. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  51. /**
  52. * Pingpong interrupt status bit definitions
  53. */
  54. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  55. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  56. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  57. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  58. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  59. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  60. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  61. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  62. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  63. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  64. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  65. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  66. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  67. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  68. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  69. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  70. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  71. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  72. /**
  73. * Interface interrupt status bit definitions
  74. */
  75. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  76. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  77. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  78. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  79. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  80. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  81. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  82. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  83. /**
  84. * Pingpong Secondary interrupt status bit definitions
  85. */
  86. #define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
  87. #define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
  88. #define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
  89. #define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
  90. #define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
  91. /**
  92. * Pingpong TEAR detection interrupt status bit definitions
  93. */
  94. #define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
  95. #define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
  96. #define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
  97. #define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
  98. /**
  99. * Pingpong TE detection interrupt status bit definitions
  100. */
  101. #define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
  102. #define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
  103. #define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
  104. #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
  105. /**
  106. * Ctl start interrupt status bit definitions
  107. */
  108. #define SDE_INTR_CTL_0_START BIT(9)
  109. #define SDE_INTR_CTL_1_START BIT(10)
  110. #define SDE_INTR_CTL_2_START BIT(11)
  111. #define SDE_INTR_CTL_3_START BIT(12)
  112. #define SDE_INTR_CTL_4_START BIT(13)
  113. #define SDE_INTR_CTL_5_START BIT(23)
  114. /**
  115. * Concurrent WB overflow interrupt status bit definitions
  116. */
  117. #define SDE_INTR_CWB_1_OVERFLOW BIT(8)
  118. #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
  119. #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
  120. #define SDE_INTR_CWB_4_OVERFLOW BIT(20)
  121. #define SDE_INTR_CWB_5_OVERFLOW BIT(21)
  122. /**
  123. * Histogram VIG done interrupt status bit definitions
  124. */
  125. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  126. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  127. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  128. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  129. /**
  130. * Histogram VIG reset Sequence done interrupt status bit definitions
  131. */
  132. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  133. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  134. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  135. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  136. /**
  137. * Histogram DSPP done interrupt status bit definitions
  138. */
  139. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  140. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  141. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  142. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  143. /**
  144. * Histogram DSPP reset Sequence done interrupt status bit definitions
  145. */
  146. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  147. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  148. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  149. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  150. /**
  151. * INTF interrupt status bit definitions
  152. */
  153. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  154. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  155. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  156. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  157. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  158. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  159. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  160. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  161. #define SDE_INTR_PROG_LINE BIT(8)
  162. /**
  163. * AD4 interrupt status bit definitions
  164. */
  165. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  166. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  167. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  168. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  169. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  170. /**
  171. * INTF Tear IRQ register bit definitions
  172. */
  173. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  174. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  175. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  176. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  177. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  178. /**
  179. * LTM interrupt status bit definitions
  180. */
  181. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  182. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  183. /**
  184. * struct sde_intr_reg - array of SDE register sets
  185. * @clr_off: offset to CLEAR reg
  186. * @en_off: offset to ENABLE reg
  187. * @status_off: offset to STATUS reg
  188. * @sde_irq_idx; global index in the 'sde_irq_map' table,
  189. * to know which interrupt type, instance, mask, etc. to use
  190. * @map_idx_start first offset in the sde_irq_map table
  191. * @map_idx_end last offset in the sde_irq_map table
  192. */
  193. struct sde_intr_reg {
  194. u32 clr_off;
  195. u32 en_off;
  196. u32 status_off;
  197. int sde_irq_idx;
  198. u32 map_idx_start;
  199. u32 map_idx_end;
  200. };
  201. /**
  202. * struct sde_irq_type - maps each irq with i/f
  203. * @intr_type: type of interrupt listed in sde_intr_type
  204. * @instance_idx: instance index of the associated HW block in SDE
  205. * @irq_mask: corresponding bit in the interrupt status reg
  206. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  207. * registers offsets to use. -1 = invalid offset
  208. */
  209. struct sde_irq_type {
  210. u32 intr_type;
  211. u32 instance_idx;
  212. u32 irq_mask;
  213. int reg_idx;
  214. };
  215. /**
  216. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  217. * a matching interface type and instance index.
  218. * Each of these tables are copied to a dynamically allocated
  219. * table, that will be used to service each of the irqs
  220. */
  221. static struct sde_irq_type sde_irq_intr_map[] = {
  222. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  223. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
  224. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  225. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  226. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  227. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  228. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  229. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  230. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  231. SDE_INTR_PING_PONG_0_DONE, -1},
  232. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  233. SDE_INTR_PING_PONG_1_DONE, -1},
  234. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  235. SDE_INTR_PING_PONG_2_DONE, -1},
  236. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  237. SDE_INTR_PING_PONG_3_DONE, -1},
  238. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  239. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  240. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  241. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  242. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  243. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  244. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  245. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  246. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  247. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  248. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  249. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  250. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  251. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  252. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  253. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  254. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  255. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  256. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  257. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  258. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  259. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  260. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  261. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  262. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  263. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  264. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  265. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  266. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  267. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  268. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  269. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  270. };
  271. static struct sde_irq_type sde_irq_intr2_map[] = {
  272. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
  273. SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
  274. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
  275. SDE_INTR_PING_PONG_S0_WR_PTR, -1},
  276. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_1, SDE_INTR_CWB_1_OVERFLOW, -1},
  277. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
  278. SDE_INTR_PING_PONG_S0_RD_PTR, -1},
  279. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  280. SDE_INTR_CTL_0_START, -1},
  281. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  282. SDE_INTR_CTL_1_START, -1},
  283. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  284. SDE_INTR_CTL_2_START, -1},
  285. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  286. SDE_INTR_CTL_3_START, -1},
  287. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  288. SDE_INTR_CTL_4_START, -1},
  289. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  290. SDE_INTR_CTL_5_START, -1},
  291. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  292. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
  293. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
  294. SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
  295. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
  296. SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
  297. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
  298. SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
  299. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
  300. SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
  301. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
  302. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
  303. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
  304. SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
  305. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
  306. SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
  307. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
  308. SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
  309. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
  310. SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
  311. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
  312. SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
  313. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
  314. SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
  315. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  316. SDE_INTR_PING_PONG_4_DONE, -1},
  317. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  318. SDE_INTR_PING_PONG_5_DONE, -1},
  319. };
  320. static struct sde_irq_type sde_irq_hist_map[] = {
  321. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  322. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  323. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  324. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  325. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  326. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  327. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  328. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  329. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  330. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  331. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  332. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  333. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  334. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  335. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  336. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  337. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  338. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  339. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  340. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  341. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  342. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  343. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  344. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  345. };
  346. static struct sde_irq_type sde_irq_intf0_map[] = {
  347. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
  348. SDE_INTR_VIDEO_INTO_STATIC, -1},
  349. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
  350. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  351. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
  352. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  353. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
  354. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  355. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
  356. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  357. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
  358. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  359. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
  360. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  361. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
  362. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  363. { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, -1},
  364. };
  365. static struct sde_irq_type sde_irq_inf1_map[] = {
  366. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
  367. SDE_INTR_VIDEO_INTO_STATIC, -1},
  368. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
  369. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  370. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
  371. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  372. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
  373. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  374. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
  375. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  376. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
  377. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  378. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
  379. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  380. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
  381. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  382. { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, -1},
  383. };
  384. static struct sde_irq_type sde_irq_intf2_map[] = {
  385. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
  386. SDE_INTR_VIDEO_INTO_STATIC, -1},
  387. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
  388. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  389. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
  390. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  391. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
  392. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  393. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
  394. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  395. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
  396. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  397. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
  398. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  399. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
  400. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  401. { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, -1},
  402. };
  403. static struct sde_irq_type sde_irq_intf3_map[] = {
  404. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
  405. SDE_INTR_VIDEO_INTO_STATIC, -1},
  406. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
  407. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  408. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
  409. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  410. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
  411. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  412. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
  413. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  414. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
  415. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  416. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
  417. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  418. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
  419. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  420. { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, -1},
  421. };
  422. static struct sde_irq_type sde_irq_inf4_map[] = {
  423. { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
  424. SDE_INTR_VIDEO_INTO_STATIC, -1},
  425. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
  426. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  427. { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
  428. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  429. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
  430. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  431. { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
  432. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  433. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
  434. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  435. { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
  436. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  437. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
  438. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  439. { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, -1},
  440. };
  441. static struct sde_irq_type sde_irq_ad4_0_map[] = {
  442. { SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, -1},
  443. };
  444. static struct sde_irq_type sde_irq_ad4_1_map[] = {
  445. { SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  446. };
  447. static struct sde_irq_type sde_irq_intf1_te_map[] = {
  448. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
  449. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  450. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
  451. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  452. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
  453. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  454. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
  455. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  456. };
  457. static struct sde_irq_type sde_irq_intf2_te_map[] = {
  458. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
  459. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  460. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
  461. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  462. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
  463. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  464. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
  465. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  466. };
  467. static struct sde_irq_type sde_irq_ltm_0_map[] = {
  468. { SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_0, SDE_INTR_LTM_STATS_DONE, -1},
  469. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_0, SDE_INTR_LTM_STATS_WB_PB, -1},
  470. };
  471. static struct sde_irq_type sde_irq_ltm_1_map[] = {
  472. { SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_1, SDE_INTR_LTM_STATS_DONE, -1},
  473. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_1, SDE_INTR_LTM_STATS_WB_PB, -1},
  474. };
  475. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  476. enum sde_intr_type intr_type, u32 instance_idx)
  477. {
  478. int i;
  479. for (i = 0; i < intr->sde_irq_map_size; i++) {
  480. if (intr_type == intr->sde_irq_map[i].intr_type &&
  481. instance_idx == intr->sde_irq_map[i].instance_idx)
  482. return i;
  483. }
  484. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  485. intr_type, instance_idx);
  486. return -EINVAL;
  487. }
  488. static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
  489. uint32_t mask)
  490. {
  491. if (!intr)
  492. return;
  493. SDE_REG_WRITE(&intr->hw, reg_off, mask);
  494. /* ensure register writes go through */
  495. wmb();
  496. }
  497. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  498. void (*cbfunc)(void *, int),
  499. void *arg)
  500. {
  501. int reg_idx;
  502. int irq_idx;
  503. int start_idx;
  504. int end_idx;
  505. u32 irq_status;
  506. unsigned long irq_flags;
  507. int sde_irq_idx;
  508. if (!intr)
  509. return;
  510. /*
  511. * The dispatcher will save the IRQ status before calling here.
  512. * Now need to go through each IRQ status and find matching
  513. * irq lookup index.
  514. */
  515. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  516. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  517. irq_status = intr->save_irq_status[reg_idx];
  518. /* get the global offset in 'sde_irq_map' */
  519. sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx;
  520. if (sde_irq_idx < 0)
  521. continue;
  522. /*
  523. * Each Interrupt register has dynamic range of indexes,
  524. * initialized during hw_intr_init when sde_irq_tbl is created.
  525. */
  526. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  527. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  528. if (start_idx >= intr->sde_irq_map_size ||
  529. end_idx > intr->sde_irq_map_size)
  530. continue;
  531. /*
  532. * Search through matching intr status from irq map.
  533. * start_idx and end_idx defined the search range in
  534. * the sde_irq_map.
  535. */
  536. for (irq_idx = start_idx;
  537. (irq_idx < end_idx) && irq_status;
  538. irq_idx++)
  539. if ((irq_status &
  540. intr->sde_irq_map[irq_idx].irq_mask) &&
  541. (intr->sde_irq_map[irq_idx].reg_idx ==
  542. reg_idx)) {
  543. /*
  544. * Once a match on irq mask, perform a callback
  545. * to the given cbfunc. cbfunc will take care
  546. * the interrupt status clearing. If cbfunc is
  547. * not provided, then the interrupt clearing
  548. * is here.
  549. */
  550. if (cbfunc)
  551. cbfunc(arg, irq_idx);
  552. else
  553. intr->ops.clear_intr_status_nolock(
  554. intr, irq_idx);
  555. /*
  556. * When callback finish, clear the irq_status
  557. * with the matching mask. Once irq_status
  558. * is all cleared, the search can be stopped.
  559. */
  560. irq_status &=
  561. ~intr->sde_irq_map[irq_idx].irq_mask;
  562. }
  563. }
  564. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  565. }
  566. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  567. {
  568. int reg_idx;
  569. const struct sde_intr_reg *reg;
  570. const struct sde_irq_type *irq;
  571. const char *dbgstr = NULL;
  572. uint32_t cache_irq_mask;
  573. if (!intr)
  574. return -EINVAL;
  575. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  576. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  577. return -EINVAL;
  578. }
  579. irq = &intr->sde_irq_map[irq_idx];
  580. reg_idx = irq->reg_idx;
  581. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  582. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  583. return -EINVAL;
  584. }
  585. reg = &intr->sde_irq_tbl[reg_idx];
  586. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  587. if (cache_irq_mask & irq->irq_mask) {
  588. dbgstr = "SDE IRQ already set:";
  589. } else {
  590. dbgstr = "SDE IRQ enabled:";
  591. cache_irq_mask |= irq->irq_mask;
  592. /* Cleaning any pending interrupt */
  593. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  594. /* Enabling interrupts with the new mask */
  595. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  596. /* ensure register write goes through */
  597. wmb();
  598. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  599. }
  600. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  601. irq->irq_mask, cache_irq_mask);
  602. return 0;
  603. }
  604. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  605. {
  606. int reg_idx;
  607. const struct sde_intr_reg *reg;
  608. const struct sde_irq_type *irq;
  609. const char *dbgstr = NULL;
  610. uint32_t cache_irq_mask;
  611. if (!intr)
  612. return -EINVAL;
  613. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  614. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  615. return -EINVAL;
  616. }
  617. irq = &intr->sde_irq_map[irq_idx];
  618. reg_idx = irq->reg_idx;
  619. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  620. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  621. return -EINVAL;
  622. }
  623. reg = &intr->sde_irq_tbl[reg_idx];
  624. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  625. if ((cache_irq_mask & irq->irq_mask) == 0) {
  626. dbgstr = "SDE IRQ is already cleared:";
  627. } else {
  628. dbgstr = "SDE IRQ mask disable:";
  629. cache_irq_mask &= ~irq->irq_mask;
  630. /* Disable interrupts based on the new mask */
  631. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  632. /* Cleaning any pending interrupt */
  633. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  634. /* ensure register write goes through */
  635. wmb();
  636. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  637. }
  638. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  639. irq->irq_mask, cache_irq_mask);
  640. return 0;
  641. }
  642. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  643. {
  644. int i;
  645. if (!intr)
  646. return -EINVAL;
  647. for (i = 0; i < intr->sde_irq_size; i++)
  648. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  649. 0xffffffff);
  650. /* ensure register writes go through */
  651. wmb();
  652. return 0;
  653. }
  654. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  655. {
  656. int i;
  657. if (!intr)
  658. return -EINVAL;
  659. for (i = 0; i < intr->sde_irq_size; i++)
  660. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  661. 0x00000000);
  662. /* ensure register writes go through */
  663. wmb();
  664. return 0;
  665. }
  666. static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
  667. uint32_t *mask)
  668. {
  669. if (!intr || !mask)
  670. return -EINVAL;
  671. *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
  672. | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
  673. return 0;
  674. }
  675. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  676. uint32_t *sources)
  677. {
  678. if (!intr || !sources)
  679. return -EINVAL;
  680. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  681. return 0;
  682. }
  683. static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
  684. {
  685. int i;
  686. u32 enable_mask;
  687. unsigned long irq_flags;
  688. if (!intr)
  689. return;
  690. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  691. for (i = 0; i < intr->sde_irq_size; i++) {
  692. /* Read interrupt status */
  693. intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
  694. intr->sde_irq_tbl[i].status_off);
  695. /* Read enable mask */
  696. enable_mask = SDE_REG_READ(&intr->hw,
  697. intr->sde_irq_tbl[i].en_off);
  698. /* and clear the interrupt */
  699. if (intr->save_irq_status[i])
  700. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  701. intr->save_irq_status[i]);
  702. /* Finally update IRQ status based on enable mask */
  703. intr->save_irq_status[i] &= enable_mask;
  704. }
  705. /* ensure register writes go through */
  706. wmb();
  707. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  708. }
  709. static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
  710. int irq_idx, u32 irq_mask)
  711. {
  712. int reg_idx;
  713. if (!intr)
  714. return;
  715. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  716. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  717. return;
  718. }
  719. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  720. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  721. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  722. return;
  723. }
  724. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  725. irq_mask);
  726. /* ensure register writes go through */
  727. wmb();
  728. }
  729. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  730. int irq_idx)
  731. {
  732. int reg_idx;
  733. if (!intr)
  734. return;
  735. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  736. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  737. return;
  738. }
  739. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  740. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  741. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  742. return;
  743. }
  744. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  745. intr->sde_irq_map[irq_idx].irq_mask);
  746. /* ensure register writes go through */
  747. wmb();
  748. }
  749. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  750. int irq_idx)
  751. {
  752. unsigned long irq_flags;
  753. if (!intr)
  754. return;
  755. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  756. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  757. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  758. }
  759. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  760. int irq_idx, bool clear)
  761. {
  762. int reg_idx;
  763. u32 intr_status;
  764. if (!intr)
  765. return 0;
  766. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  767. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  768. return 0;
  769. }
  770. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  771. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  772. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  773. return 0;
  774. }
  775. intr_status = SDE_REG_READ(&intr->hw,
  776. intr->sde_irq_tbl[reg_idx].status_off) &
  777. intr->sde_irq_map[irq_idx].irq_mask;
  778. if (intr_status && clear)
  779. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  780. intr_status);
  781. /* ensure register writes go through */
  782. wmb();
  783. return intr_status;
  784. }
  785. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  786. int irq_idx, bool clear)
  787. {
  788. int reg_idx;
  789. unsigned long irq_flags;
  790. u32 intr_status;
  791. if (!intr)
  792. return 0;
  793. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  794. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  795. return 0;
  796. }
  797. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  798. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  799. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  800. return 0;
  801. }
  802. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  803. intr_status = SDE_REG_READ(&intr->hw,
  804. intr->sde_irq_tbl[reg_idx].status_off) &
  805. intr->sde_irq_map[irq_idx].irq_mask;
  806. if (intr_status && clear)
  807. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  808. intr_status);
  809. /* ensure register writes go through */
  810. wmb();
  811. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  812. return intr_status;
  813. }
  814. static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
  815. int irq_idx, bool clear)
  816. {
  817. int reg_idx;
  818. unsigned long irq_flags;
  819. u32 intr_status = 0;
  820. if (!intr)
  821. return 0;
  822. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  823. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  824. return 0;
  825. }
  826. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  827. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  828. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  829. return 0;
  830. }
  831. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  832. intr_status = SDE_REG_READ(&intr->hw,
  833. intr->sde_irq_tbl[reg_idx].status_off);
  834. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  835. return intr_status;
  836. }
  837. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  838. {
  839. ops->set_mask = sde_hw_intr_set_mask;
  840. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  841. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  842. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  843. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  844. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  845. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  846. ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
  847. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  848. ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
  849. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  850. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  851. ops->clear_intr_status_force_mask =
  852. sde_hw_intr_clear_intr_status_force_mask;
  853. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  854. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  855. ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
  856. }
  857. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  858. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  859. {
  860. if (!m || !addr || !hw || m->mdp_count == 0)
  861. return NULL;
  862. hw->base_off = addr;
  863. hw->blk_off = m->mdss[0].base;
  864. hw->hwversion = m->hwversion;
  865. return &m->mdss[0];
  866. }
  867. static inline int _sde_hw_intr_init_sde_irq_tbl(u32 irq_tbl_size,
  868. struct sde_intr_reg *sde_irq_tbl)
  869. {
  870. int idx;
  871. struct sde_intr_reg *sde_irq;
  872. for (idx = 0; idx < irq_tbl_size; idx++) {
  873. sde_irq = &sde_irq_tbl[idx];
  874. switch (sde_irq->sde_irq_idx) {
  875. case MDSS_INTR_SSPP_TOP0_INTR:
  876. sde_irq->clr_off =
  877. MDP_SSPP_TOP0_OFF+INTR_CLEAR;
  878. sde_irq->en_off =
  879. MDP_SSPP_TOP0_OFF+INTR_EN;
  880. sde_irq->status_off =
  881. MDP_SSPP_TOP0_OFF+INTR_STATUS;
  882. break;
  883. case MDSS_INTR_SSPP_TOP0_INTR2:
  884. sde_irq->clr_off =
  885. MDP_SSPP_TOP0_OFF+INTR2_CLEAR;
  886. sde_irq->en_off =
  887. MDP_SSPP_TOP0_OFF+INTR2_EN;
  888. sde_irq->status_off =
  889. MDP_SSPP_TOP0_OFF+INTR2_STATUS;
  890. break;
  891. case MDSS_INTR_SSPP_TOP0_HIST_INTR:
  892. sde_irq->clr_off =
  893. MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR;
  894. sde_irq->en_off =
  895. MDP_SSPP_TOP0_OFF+HIST_INTR_EN;
  896. sde_irq->status_off =
  897. MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS;
  898. break;
  899. case MDSS_INTR_INTF_0_INTR:
  900. sde_irq->clr_off =
  901. MDP_INTF_0_OFF+INTF_INTR_CLEAR;
  902. sde_irq->en_off =
  903. MDP_INTF_0_OFF+INTF_INTR_EN;
  904. sde_irq->status_off =
  905. MDP_INTF_0_OFF+INTF_INTR_STATUS;
  906. break;
  907. case MDSS_INTR_INTF_1_INTR:
  908. sde_irq->clr_off =
  909. MDP_INTF_1_OFF+INTF_INTR_CLEAR;
  910. sde_irq->en_off =
  911. MDP_INTF_1_OFF+INTF_INTR_EN;
  912. sde_irq->status_off =
  913. MDP_INTF_1_OFF+INTF_INTR_STATUS;
  914. break;
  915. case MDSS_INTR_INTF_2_INTR:
  916. sde_irq->clr_off =
  917. MDP_INTF_2_OFF+INTF_INTR_CLEAR;
  918. sde_irq->en_off =
  919. MDP_INTF_2_OFF+INTF_INTR_EN;
  920. sde_irq->status_off =
  921. MDP_INTF_2_OFF+INTF_INTR_STATUS;
  922. break;
  923. case MDSS_INTR_INTF_3_INTR:
  924. sde_irq->clr_off =
  925. MDP_INTF_3_OFF+INTF_INTR_CLEAR;
  926. sde_irq->en_off =
  927. MDP_INTF_3_OFF+INTF_INTR_EN;
  928. sde_irq->status_off =
  929. MDP_INTF_3_OFF+INTF_INTR_STATUS;
  930. break;
  931. case MDSS_INTR_INTF_4_INTR:
  932. sde_irq->clr_off =
  933. MDP_INTF_4_OFF+INTF_INTR_CLEAR;
  934. sde_irq->en_off =
  935. MDP_INTF_4_OFF+INTF_INTR_EN;
  936. sde_irq->status_off =
  937. MDP_INTF_4_OFF+INTF_INTR_STATUS;
  938. break;
  939. case MDSS_INTR_AD4_0_INTR:
  940. sde_irq->clr_off =
  941. MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF;
  942. sde_irq->en_off =
  943. MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF;
  944. sde_irq->status_off =
  945. MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF;
  946. break;
  947. case MDSS_INTR_AD4_1_INTR:
  948. sde_irq->clr_off =
  949. MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF;
  950. sde_irq->en_off =
  951. MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF;
  952. sde_irq->status_off =
  953. MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF;
  954. break;
  955. case MDSS_INTF_TEAR_1_INTR:
  956. sde_irq->clr_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
  957. MDP_INTF_TEAR_INTR_CLEAR_OFF;
  958. sde_irq->en_off =
  959. MDP_INTF_TEAR_INTF_1_IRQ_OFF +
  960. MDP_INTF_TEAR_INTR_EN_OFF;
  961. sde_irq->status_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
  962. MDP_INTF_TEAR_INTR_STATUS_OFF;
  963. break;
  964. case MDSS_INTF_TEAR_2_INTR:
  965. sde_irq->clr_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
  966. MDP_INTF_TEAR_INTR_CLEAR_OFF;
  967. sde_irq->en_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
  968. MDP_INTF_TEAR_INTR_EN_OFF;
  969. sde_irq->status_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
  970. MDP_INTF_TEAR_INTR_STATUS_OFF;
  971. break;
  972. case MDSS_INTR_LTM_0_INTR:
  973. sde_irq->clr_off =
  974. MDP_LTM_0_OFF + MDP_LTM_INTR_CLEAR_OFF;
  975. sde_irq->en_off =
  976. MDP_LTM_0_OFF + MDP_LTM_INTR_EN_OFF;
  977. sde_irq->status_off =
  978. MDP_LTM_0_OFF + MDP_LTM_INTR_STATUS_OFF;
  979. break;
  980. case MDSS_INTR_LTM_1_INTR:
  981. sde_irq->clr_off =
  982. MDP_LTM_1_OFF + MDP_LTM_INTR_CLEAR_OFF;
  983. sde_irq->en_off =
  984. MDP_LTM_1_OFF + MDP_LTM_INTR_EN_OFF;
  985. sde_irq->status_off =
  986. MDP_LTM_1_OFF + MDP_LTM_INTR_STATUS_OFF;
  987. break;
  988. default:
  989. pr_err("wrong irq idx %d\n",
  990. sde_irq->sde_irq_idx);
  991. return -EINVAL;
  992. }
  993. pr_debug("idx:%d irq_idx:%d clr:0x%x en:0x%x status:0x%x\n",
  994. idx, sde_irq->sde_irq_idx, sde_irq->clr_off,
  995. sde_irq->en_off, sde_irq->status_off);
  996. }
  997. return 0;
  998. }
  999. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  1000. {
  1001. if (intr) {
  1002. kfree(intr->sde_irq_tbl);
  1003. kfree(intr->sde_irq_map);
  1004. kfree(intr->cache_irq_mask);
  1005. kfree(intr->save_irq_status);
  1006. kfree(intr);
  1007. }
  1008. }
  1009. static inline u32 _get_irq_map_size(int idx)
  1010. {
  1011. u32 ret = 0;
  1012. switch (idx) {
  1013. case MDSS_INTR_SSPP_TOP0_INTR:
  1014. ret = ARRAY_SIZE(sde_irq_intr_map);
  1015. break;
  1016. case MDSS_INTR_SSPP_TOP0_INTR2:
  1017. ret = ARRAY_SIZE(sde_irq_intr2_map);
  1018. break;
  1019. case MDSS_INTR_SSPP_TOP0_HIST_INTR:
  1020. ret = ARRAY_SIZE(sde_irq_hist_map);
  1021. break;
  1022. case MDSS_INTR_INTF_0_INTR:
  1023. ret = ARRAY_SIZE(sde_irq_intf0_map);
  1024. break;
  1025. case MDSS_INTR_INTF_1_INTR:
  1026. ret = ARRAY_SIZE(sde_irq_inf1_map);
  1027. break;
  1028. case MDSS_INTR_INTF_2_INTR:
  1029. ret = ARRAY_SIZE(sde_irq_intf2_map);
  1030. break;
  1031. case MDSS_INTR_INTF_3_INTR:
  1032. ret = ARRAY_SIZE(sde_irq_intf3_map);
  1033. break;
  1034. case MDSS_INTR_INTF_4_INTR:
  1035. ret = ARRAY_SIZE(sde_irq_inf4_map);
  1036. break;
  1037. case MDSS_INTR_AD4_0_INTR:
  1038. ret = ARRAY_SIZE(sde_irq_ad4_0_map);
  1039. break;
  1040. case MDSS_INTR_AD4_1_INTR:
  1041. ret = ARRAY_SIZE(sde_irq_ad4_1_map);
  1042. break;
  1043. case MDSS_INTF_TEAR_1_INTR:
  1044. ret = ARRAY_SIZE(sde_irq_intf1_te_map);
  1045. break;
  1046. case MDSS_INTF_TEAR_2_INTR:
  1047. ret = ARRAY_SIZE(sde_irq_intf2_te_map);
  1048. break;
  1049. case MDSS_INTR_LTM_0_INTR:
  1050. ret = ARRAY_SIZE(sde_irq_ltm_0_map);
  1051. break;
  1052. case MDSS_INTR_LTM_1_INTR:
  1053. ret = ARRAY_SIZE(sde_irq_ltm_1_map);
  1054. break;
  1055. default:
  1056. pr_err("invalid idx:%d\n", idx);
  1057. }
  1058. return ret;
  1059. }
  1060. static inline struct sde_irq_type *_get_irq_map_addr(int idx)
  1061. {
  1062. struct sde_irq_type *ret = NULL;
  1063. switch (idx) {
  1064. case MDSS_INTR_SSPP_TOP0_INTR:
  1065. ret = sde_irq_intr_map;
  1066. break;
  1067. case MDSS_INTR_SSPP_TOP0_INTR2:
  1068. ret = sde_irq_intr2_map;
  1069. break;
  1070. case MDSS_INTR_SSPP_TOP0_HIST_INTR:
  1071. ret = sde_irq_hist_map;
  1072. break;
  1073. case MDSS_INTR_INTF_0_INTR:
  1074. ret = sde_irq_intf0_map;
  1075. break;
  1076. case MDSS_INTR_INTF_1_INTR:
  1077. ret = sde_irq_inf1_map;
  1078. break;
  1079. case MDSS_INTR_INTF_2_INTR:
  1080. ret = sde_irq_intf2_map;
  1081. break;
  1082. case MDSS_INTR_INTF_3_INTR:
  1083. ret = sde_irq_intf3_map;
  1084. break;
  1085. case MDSS_INTR_INTF_4_INTR:
  1086. ret = sde_irq_inf4_map;
  1087. break;
  1088. case MDSS_INTR_AD4_0_INTR:
  1089. ret = sde_irq_ad4_0_map;
  1090. break;
  1091. case MDSS_INTR_AD4_1_INTR:
  1092. ret = sde_irq_ad4_1_map;
  1093. break;
  1094. case MDSS_INTF_TEAR_1_INTR:
  1095. ret = sde_irq_intf1_te_map;
  1096. break;
  1097. case MDSS_INTF_TEAR_2_INTR:
  1098. ret = sde_irq_intf2_te_map;
  1099. break;
  1100. case MDSS_INTR_LTM_0_INTR:
  1101. ret = sde_irq_ltm_0_map;
  1102. break;
  1103. case MDSS_INTR_LTM_1_INTR:
  1104. ret = sde_irq_ltm_1_map;
  1105. break;
  1106. default:
  1107. pr_err("invalid idx:%d\n", idx);
  1108. }
  1109. return ret;
  1110. }
  1111. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  1112. u32 irq_idx, u32 low_idx, u32 high_idx)
  1113. {
  1114. int i, j = 0;
  1115. struct sde_irq_type *src = _get_irq_map_addr(irq_idx);
  1116. u32 src_size = _get_irq_map_size(irq_idx);
  1117. if (!src)
  1118. return -EINVAL;
  1119. if (low_idx >= size || high_idx > size ||
  1120. (high_idx - low_idx > src_size)) {
  1121. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  1122. low_idx, high_idx, size, src_size);
  1123. return -EINVAL;
  1124. }
  1125. for (i = low_idx; i < high_idx; i++)
  1126. sde_irq_map[i] = src[j++];
  1127. return 0;
  1128. }
  1129. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  1130. struct sde_mdss_cfg *m)
  1131. {
  1132. int i, idx, sde_irq_tbl_idx = 0, ret = 0;
  1133. u32 low_idx, high_idx;
  1134. u32 sde_irq_map_idx = 0;
  1135. /* Initialize the offset of the irq's in the sde_irq_map table */
  1136. for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
  1137. if (test_bit(idx, m->mdss_irqs)) {
  1138. low_idx = sde_irq_map_idx;
  1139. high_idx = low_idx + _get_irq_map_size(idx);
  1140. pr_debug("init[%d]=%d low:%d high:%d\n",
  1141. sde_irq_tbl_idx, idx, low_idx, high_idx);
  1142. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  1143. sde_irq_tbl_idx < 0) {
  1144. ret = -EINVAL;
  1145. goto exit;
  1146. }
  1147. /* init sde_irq_map with the global irq mapping table */
  1148. if (_sde_copy_regs(intr->sde_irq_map,
  1149. intr->sde_irq_map_size,
  1150. idx, low_idx, high_idx)) {
  1151. ret = -EINVAL;
  1152. goto exit;
  1153. }
  1154. /* init irq map with its reg idx within the irq tbl */
  1155. for (i = low_idx; i < high_idx; i++) {
  1156. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  1157. pr_debug("sde_irq_map[%d].reg_idx=%d\n",
  1158. i, sde_irq_tbl_idx);
  1159. }
  1160. /* track the idx of the mapping table for this irq in
  1161. * sde_irq_map, this to only access the indexes of this
  1162. * irq during the irq dispatch
  1163. */
  1164. intr->sde_irq_tbl[sde_irq_tbl_idx].sde_irq_idx = idx;
  1165. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start =
  1166. low_idx;
  1167. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end =
  1168. high_idx;
  1169. /* increment idx for both tables accordingly */
  1170. sde_irq_tbl_idx++;
  1171. sde_irq_map_idx = high_idx;
  1172. }
  1173. }
  1174. /* do this after 'sde_irq_idx is initialized in sde_irq_tbl */
  1175. ret = _sde_hw_intr_init_sde_irq_tbl(intr->sde_irq_size,
  1176. intr->sde_irq_tbl);
  1177. exit:
  1178. return ret;
  1179. }
  1180. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  1181. struct sde_mdss_cfg *m)
  1182. {
  1183. struct sde_hw_intr *intr = NULL;
  1184. struct sde_mdss_base_cfg *cfg;
  1185. u32 irq_regs_count = 0;
  1186. u32 irq_map_count = 0;
  1187. u32 size;
  1188. int idx;
  1189. int ret = 0;
  1190. if (!addr || !m) {
  1191. ret = -EINVAL;
  1192. goto exit;
  1193. }
  1194. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  1195. if (!intr) {
  1196. ret = -ENOMEM;
  1197. goto exit;
  1198. }
  1199. cfg = __intr_offset(m, addr, &intr->hw);
  1200. if (!cfg) {
  1201. ret = -EINVAL;
  1202. goto exit;
  1203. }
  1204. __setup_intr_ops(&intr->ops);
  1205. if (MDSS_INTR_MAX >= UINT_MAX) {
  1206. pr_err("max intr exceeded:%d\n", MDSS_INTR_MAX);
  1207. ret = -EINVAL;
  1208. goto exit;
  1209. }
  1210. /* check how many irq's this target supports */
  1211. for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
  1212. if (test_bit(idx, m->mdss_irqs)) {
  1213. irq_regs_count++;
  1214. size = _get_irq_map_size(idx);
  1215. if (!size || irq_map_count >= UINT_MAX - size) {
  1216. pr_err("wrong map cnt idx:%d sz:%d cnt:%d\n",
  1217. idx, size, irq_map_count);
  1218. ret = -EINVAL;
  1219. goto exit;
  1220. }
  1221. irq_map_count += size;
  1222. }
  1223. }
  1224. if (irq_regs_count == 0 || irq_regs_count > MDSS_INTR_MAX ||
  1225. irq_map_count == 0) {
  1226. pr_err("wrong mapping of supported irqs 0x%lx\n",
  1227. m->mdss_irqs[0]);
  1228. ret = -EINVAL;
  1229. goto exit;
  1230. }
  1231. /* Allocate table for the irq registers */
  1232. intr->sde_irq_size = irq_regs_count;
  1233. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  1234. GFP_KERNEL);
  1235. if (intr->sde_irq_tbl == NULL) {
  1236. ret = -ENOMEM;
  1237. goto exit;
  1238. }
  1239. /* Allocate table with the valid interrupts bits */
  1240. intr->sde_irq_map_size = irq_map_count;
  1241. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  1242. GFP_KERNEL);
  1243. if (intr->sde_irq_map == NULL) {
  1244. ret = -ENOMEM;
  1245. goto exit;
  1246. }
  1247. /* Initialize IRQs tables */
  1248. ret = _sde_hw_intr_init_irq_tables(intr, m);
  1249. if (ret)
  1250. goto exit;
  1251. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  1252. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  1253. if (intr->cache_irq_mask == NULL) {
  1254. ret = -ENOMEM;
  1255. goto exit;
  1256. }
  1257. intr->save_irq_status = kcalloc(intr->sde_irq_size,
  1258. sizeof(*intr->save_irq_status), GFP_KERNEL);
  1259. if (intr->save_irq_status == NULL) {
  1260. ret = -ENOMEM;
  1261. goto exit;
  1262. }
  1263. spin_lock_init(&intr->irq_lock);
  1264. exit:
  1265. if (ret) {
  1266. sde_hw_intr_destroy(intr);
  1267. return ERR_PTR(ret);
  1268. }
  1269. return intr;
  1270. }