sde_hw_interrupts.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/slab.h>
  8. #include "sde_kms.h"
  9. #include "sde_hw_interrupts.h"
  10. #include "sde_hw_util.h"
  11. #include "sde_hw_mdss.h"
  12. /**
  13. * Register offsets in MDSS register file for the interrupt registers
  14. * w.r.t. base for that block. Base offsets for IRQs should come from the
  15. * device tree and get stored in the catalog(irq_offset_list) until they
  16. * are added to the sde_irq_tbl during the table initialization.
  17. */
  18. #define HW_INTR_STATUS 0x0010
  19. #define MDP_AD4_INTR_EN_OFF 0x41c
  20. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  21. #define MDP_AD4_INTR_STATUS_OFF 0x420
  22. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  23. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  24. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  25. #define MDP_LTM_INTR_EN_OFF 0x50
  26. #define MDP_LTM_INTR_STATUS_OFF 0x54
  27. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  28. #define MDP_WB_INTR_EN_OFF 0x18C
  29. #define MDP_WB_INTR_STATUS_OFF 0x190
  30. #define MDP_WB_INTR_CLEAR_OFF 0x194
  31. /**
  32. * WB interrupt status bit definitions
  33. */
  34. #define SDE_INTR_WB_0_DONE BIT(0)
  35. #define SDE_INTR_WB_1_DONE BIT(1)
  36. #define SDE_INTR_WB_2_DONE BIT(4)
  37. /**
  38. * WDOG timer interrupt status bit definitions
  39. */
  40. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  41. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  42. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  43. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  44. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  45. /**
  46. * Pingpong interrupt status bit definitions
  47. */
  48. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  49. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  50. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  51. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  52. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  53. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  54. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  55. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  56. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  57. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  58. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  59. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  60. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  61. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  62. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  63. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  64. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  65. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  66. /**
  67. * Interface interrupt status bit definitions
  68. */
  69. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  70. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  71. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  72. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  73. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  74. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  75. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  76. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  77. /**
  78. * Ctl start interrupt status bit definitions
  79. */
  80. #define SDE_INTR_CTL_0_START BIT(9)
  81. #define SDE_INTR_CTL_1_START BIT(10)
  82. #define SDE_INTR_CTL_2_START BIT(11)
  83. #define SDE_INTR_CTL_3_START BIT(12)
  84. #define SDE_INTR_CTL_4_START BIT(13)
  85. #define SDE_INTR_CTL_5_START BIT(23)
  86. /**
  87. * Ctl done interrupt status bit definitions
  88. */
  89. #define SDE_INTR_CTL_0_DONE BIT(0)
  90. #define SDE_INTR_CTL_1_DONE BIT(1)
  91. #define SDE_INTR_CTL_2_DONE BIT(2)
  92. #define SDE_INTR_CTL_3_DONE BIT(3)
  93. #define SDE_INTR_CTL_4_DONE BIT(4)
  94. #define SDE_INTR_CTL_5_DONE BIT(5)
  95. /**
  96. * Concurrent WB overflow interrupt status bit definitions
  97. */
  98. #define SDE_INTR_CWB_OVERFLOW BIT(29)
  99. #define SDE_INTR_CWB_2_OVERFLOW BIT(28)
  100. /**
  101. * Histogram VIG done interrupt status bit definitions
  102. */
  103. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  104. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  105. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  106. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  107. /**
  108. * Histogram VIG reset Sequence done interrupt status bit definitions
  109. */
  110. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  111. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  112. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  113. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  114. /**
  115. * Histogram DSPP done interrupt status bit definitions
  116. */
  117. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  118. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  119. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  120. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  121. /**
  122. * Histogram DSPP reset Sequence done interrupt status bit definitions
  123. */
  124. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  125. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  126. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  127. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  128. /**
  129. * INTF interrupt status bit definitions
  130. */
  131. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  132. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  133. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  134. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  135. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  136. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  137. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  138. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  139. #define SDE_INTR_PROG_LINE BIT(8)
  140. #define SDE_INTR_INTF_WD_TIMER_0_DONE BIT(13)
  141. /**
  142. * AD4 interrupt status bit definitions
  143. */
  144. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  145. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  146. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  147. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  148. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  149. /**
  150. * INTF Tear IRQ register bit definitions
  151. */
  152. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  153. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  154. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  155. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  156. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  157. #define SDE_INTR_INTF_TEAR_TE_DEASSERT_DETECTED BIT(6)
  158. /**
  159. * LTM interrupt status bit definitions
  160. */
  161. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  162. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  163. /**
  164. * WB interrupt status bit definitions
  165. */
  166. #define SDE_INTR_WB_PROG_LINE BIT(0)
  167. /**
  168. * struct sde_intr_reg - array of SDE register sets
  169. * @clr_off: offset to CLEAR reg
  170. * @en_off: offset to ENABLE reg
  171. * @status_off: offset to STATUS reg
  172. * @map_idx_start first offset in the sde_irq_map table
  173. * @map_idx_end last offset in the sde_irq_map table
  174. */
  175. struct sde_intr_reg {
  176. u32 clr_off;
  177. u32 en_off;
  178. u32 status_off;
  179. u32 map_idx_start;
  180. u32 map_idx_end;
  181. };
  182. /**
  183. * struct sde_irq_type - maps each irq with i/f
  184. * @intr_type: type of interrupt listed in sde_intr_type
  185. * @instance_idx: instance index of the associated HW block in SDE
  186. * @irq_mask: corresponding bit in the interrupt status reg
  187. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  188. * registers offsets to use.
  189. */
  190. struct sde_irq_type {
  191. u32 intr_type;
  192. u32 instance_idx;
  193. u32 irq_mask;
  194. int reg_idx;
  195. };
  196. /**
  197. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  198. * a matching interface type and instance index.
  199. * Each of these tables are copied to a dynamically allocated
  200. * table, that will be used to service each of the irqs
  201. * -1 indicates an uninitialized value which should be set when copying
  202. * these tables to the sde_irq_map.
  203. */
  204. static struct sde_irq_type sde_irq_intr_map[] = {
  205. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  206. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
  207. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  208. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  209. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  210. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  211. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  212. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  213. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  214. SDE_INTR_PING_PONG_0_DONE, -1},
  215. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  216. SDE_INTR_PING_PONG_1_DONE, -1},
  217. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  218. SDE_INTR_PING_PONG_2_DONE, -1},
  219. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  220. SDE_INTR_PING_PONG_3_DONE, -1},
  221. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  222. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  223. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  224. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  225. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  226. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  227. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  228. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  229. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  230. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  231. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  232. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  233. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  234. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  235. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  236. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  237. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  238. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  239. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  240. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  241. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  242. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  243. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  244. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  245. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  246. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  247. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  248. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  249. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  250. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  251. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  252. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  253. };
  254. static struct sde_irq_type sde_irq_intr2_map[] = {
  255. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  256. SDE_INTR_CTL_0_START, -1},
  257. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  258. SDE_INTR_CTL_1_START, -1},
  259. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  260. SDE_INTR_CTL_2_START, -1},
  261. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  262. SDE_INTR_CTL_3_START, -1},
  263. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  264. SDE_INTR_CTL_4_START, -1},
  265. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  266. SDE_INTR_CTL_5_START, -1},
  267. { SDE_IRQ_TYPE_CTL_DONE, CTL_0,
  268. SDE_INTR_CTL_0_DONE, -1},
  269. { SDE_IRQ_TYPE_CTL_DONE, CTL_1,
  270. SDE_INTR_CTL_1_DONE, -1},
  271. { SDE_IRQ_TYPE_CTL_DONE, CTL_2,
  272. SDE_INTR_CTL_2_DONE, -1},
  273. { SDE_IRQ_TYPE_CTL_DONE, CTL_3,
  274. SDE_INTR_CTL_3_DONE, -1},
  275. { SDE_IRQ_TYPE_CTL_DONE, CTL_4,
  276. SDE_INTR_CTL_4_DONE, -1},
  277. { SDE_IRQ_TYPE_CTL_DONE, CTL_5,
  278. SDE_INTR_CTL_5_DONE, -1},
  279. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_0, SDE_INTR_CWB_OVERFLOW, -1},
  280. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  281. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  282. SDE_INTR_PING_PONG_4_DONE, -1},
  283. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  284. SDE_INTR_PING_PONG_5_DONE, -1},
  285. };
  286. static struct sde_irq_type sde_irq_hist_map[] = {
  287. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  288. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  289. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  290. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  291. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  292. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  293. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  294. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  295. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  296. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  297. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  298. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  299. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  300. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  301. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  302. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  303. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  304. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  305. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  306. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  307. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  308. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  309. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  310. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  311. };
  312. static struct sde_irq_type sde_irq_intf_map[] = {
  313. { SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
  314. SDE_INTR_VIDEO_INTO_STATIC, -1},
  315. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
  316. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  317. { SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
  318. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  319. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
  320. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  321. { SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
  322. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  323. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
  324. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  325. { SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
  326. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  327. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
  328. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  329. { SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
  330. { SDE_IRQ_TYPE_WD_TIMER, -1, SDE_INTR_WD_TIMER_0_DONE, -1},
  331. };
  332. static struct sde_irq_type sde_irq_ad4_map[] = {
  333. { SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  334. };
  335. static struct sde_irq_type sde_irq_intf_te_map[] = {
  336. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
  337. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  338. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
  339. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  340. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
  341. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  342. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_DETECT, -1,
  343. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  344. { SDE_IRQ_TYPE_INTF_TEAR_TE_ASSERT, -1,
  345. SDE_INTR_INTF_TEAR_TE_DETECTED, -1},
  346. { SDE_IRQ_TYPE_INTF_TEAR_TE_DEASSERT, -1,
  347. SDE_INTR_INTF_TEAR_TE_DEASSERT_DETECTED, -1},
  348. };
  349. static struct sde_irq_type sde_irq_ltm_map[] = {
  350. { SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
  351. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
  352. };
  353. static struct sde_irq_type sde_irq_wb_map[] = {
  354. { SDE_IRQ_TYPE_WB_PROG_LINE, -1, SDE_INTR_WB_PROG_LINE, -1},
  355. };
  356. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  357. enum sde_intr_type intr_type, u32 instance_idx)
  358. {
  359. int i;
  360. for (i = 0; i < intr->sde_irq_map_size; i++) {
  361. if (intr_type == intr->sde_irq_map[i].intr_type &&
  362. instance_idx == intr->sde_irq_map[i].instance_idx)
  363. return i;
  364. }
  365. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  366. intr_type, instance_idx);
  367. return -EINVAL;
  368. }
  369. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  370. void (*cbfunc)(void *, int),
  371. void *arg)
  372. {
  373. int reg_idx;
  374. int irq_idx;
  375. int start_idx;
  376. int end_idx;
  377. u32 irq_status;
  378. u32 enable_mask;
  379. unsigned long irq_flags;
  380. if (!intr)
  381. return;
  382. /*
  383. * The dispatcher will save the IRQ status before calling here.
  384. * Now need to go through each IRQ status and find matching
  385. * irq lookup index.
  386. */
  387. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  388. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  389. /*
  390. * Each Interrupt register has dynamic range of indexes,
  391. * initialized during hw_intr_init when sde_irq_tbl is created.
  392. */
  393. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  394. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  395. if (start_idx >= intr->sde_irq_map_size ||
  396. end_idx > intr->sde_irq_map_size)
  397. continue;
  398. /* Read interrupt status */
  399. irq_status = SDE_REG_READ(&intr->hw, intr->sde_irq_tbl[reg_idx].status_off);
  400. /* Read enable mask */
  401. enable_mask = SDE_REG_READ(&intr->hw, intr->sde_irq_tbl[reg_idx].en_off);
  402. /* and clear the interrupt */
  403. if (irq_status)
  404. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  405. irq_status);
  406. /* Finally update IRQ status based on enable mask */
  407. irq_status &= enable_mask;
  408. /*
  409. * Search through matching intr status from irq map.
  410. * start_idx and end_idx defined the search range in
  411. * the sde_irq_map.
  412. */
  413. for (irq_idx = start_idx;
  414. (irq_idx < end_idx) && irq_status;
  415. irq_idx++)
  416. if ((irq_status &
  417. intr->sde_irq_map[irq_idx].irq_mask) &&
  418. (intr->sde_irq_map[irq_idx].reg_idx ==
  419. reg_idx)) {
  420. /*
  421. * Once a match on irq mask, perform a callback
  422. * to the given cbfunc. cbfunc will take care
  423. * the interrupt status clearing. If cbfunc is
  424. * not provided, then the interrupt clearing
  425. * is here.
  426. */
  427. if (cbfunc)
  428. cbfunc(arg, irq_idx);
  429. else
  430. intr->ops.clear_intr_status_nolock(
  431. intr, irq_idx);
  432. /*
  433. * When callback finish, clear the irq_status
  434. * with the matching mask. Once irq_status
  435. * is all cleared, the search can be stopped.
  436. */
  437. irq_status &=
  438. ~intr->sde_irq_map[irq_idx].irq_mask;
  439. }
  440. }
  441. /* ensure register writes go through */
  442. wmb();
  443. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  444. }
  445. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  446. {
  447. int reg_idx;
  448. const struct sde_intr_reg *reg;
  449. const struct sde_irq_type *irq;
  450. const char *dbgstr = NULL;
  451. uint32_t cache_irq_mask;
  452. if (!intr)
  453. return -EINVAL;
  454. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  455. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  456. return -EINVAL;
  457. }
  458. irq = &intr->sde_irq_map[irq_idx];
  459. reg_idx = irq->reg_idx;
  460. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  461. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  462. return -EINVAL;
  463. }
  464. reg = &intr->sde_irq_tbl[reg_idx];
  465. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  466. if (cache_irq_mask & irq->irq_mask) {
  467. dbgstr = "SDE IRQ already set:";
  468. } else {
  469. dbgstr = "SDE IRQ enabled:";
  470. cache_irq_mask |= irq->irq_mask;
  471. /* Cleaning any pending interrupt */
  472. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  473. /* Enabling interrupts with the new mask */
  474. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  475. /* ensure register write goes through */
  476. wmb();
  477. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  478. }
  479. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  480. irq->irq_mask, cache_irq_mask);
  481. return 0;
  482. }
  483. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  484. {
  485. int reg_idx;
  486. const struct sde_intr_reg *reg;
  487. const struct sde_irq_type *irq;
  488. const char *dbgstr = NULL;
  489. uint32_t cache_irq_mask;
  490. if (!intr)
  491. return -EINVAL;
  492. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  493. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  494. return -EINVAL;
  495. }
  496. irq = &intr->sde_irq_map[irq_idx];
  497. reg_idx = irq->reg_idx;
  498. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  499. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  500. return -EINVAL;
  501. }
  502. reg = &intr->sde_irq_tbl[reg_idx];
  503. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  504. if ((cache_irq_mask & irq->irq_mask) == 0) {
  505. dbgstr = "SDE IRQ is already cleared:";
  506. } else {
  507. dbgstr = "SDE IRQ mask disable:";
  508. cache_irq_mask &= ~irq->irq_mask;
  509. /* Disable interrupts based on the new mask */
  510. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  511. /* Cleaning any pending interrupt */
  512. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  513. /* ensure register write goes through */
  514. wmb();
  515. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  516. }
  517. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  518. irq->irq_mask, cache_irq_mask);
  519. return 0;
  520. }
  521. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  522. {
  523. int i;
  524. if (!intr)
  525. return -EINVAL;
  526. for (i = 0; i < intr->sde_irq_size; i++)
  527. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  528. 0xffffffff);
  529. /* ensure register writes go through */
  530. wmb();
  531. return 0;
  532. }
  533. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  534. {
  535. int i;
  536. if (!intr)
  537. return -EINVAL;
  538. for (i = 0; i < intr->sde_irq_size; i++)
  539. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  540. 0x00000000);
  541. /* ensure register writes go through */
  542. wmb();
  543. return 0;
  544. }
  545. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  546. uint32_t *sources)
  547. {
  548. if (!intr || !sources)
  549. return -EINVAL;
  550. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  551. return 0;
  552. }
  553. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  554. int irq_idx)
  555. {
  556. int reg_idx;
  557. if (!intr)
  558. return;
  559. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  560. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  561. return;
  562. }
  563. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  564. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  565. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  566. return;
  567. }
  568. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  569. intr->sde_irq_map[irq_idx].irq_mask);
  570. /* ensure register writes go through */
  571. wmb();
  572. }
  573. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  574. int irq_idx)
  575. {
  576. unsigned long irq_flags;
  577. if (!intr)
  578. return;
  579. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  580. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  581. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  582. }
  583. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  584. int irq_idx, bool clear)
  585. {
  586. int reg_idx;
  587. u32 intr_status;
  588. if (!intr)
  589. return 0;
  590. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  591. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  592. return 0;
  593. }
  594. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  595. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  596. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  597. return 0;
  598. }
  599. intr_status = SDE_REG_READ(&intr->hw,
  600. intr->sde_irq_tbl[reg_idx].status_off) &
  601. intr->sde_irq_map[irq_idx].irq_mask;
  602. if (intr_status && clear)
  603. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  604. intr_status);
  605. /* ensure register writes go through */
  606. wmb();
  607. return intr_status;
  608. }
  609. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  610. int irq_idx, bool clear)
  611. {
  612. int reg_idx;
  613. unsigned long irq_flags;
  614. u32 intr_status;
  615. if (!intr)
  616. return 0;
  617. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  618. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  619. return 0;
  620. }
  621. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  622. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  623. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  624. return 0;
  625. }
  626. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  627. intr_status = SDE_REG_READ(&intr->hw,
  628. intr->sde_irq_tbl[reg_idx].status_off) &
  629. intr->sde_irq_map[irq_idx].irq_mask;
  630. if (intr_status && clear)
  631. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  632. intr_status);
  633. /* ensure register writes go through */
  634. wmb();
  635. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  636. return intr_status;
  637. }
  638. static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
  639. struct sde_intr_irq_offsets *item)
  640. {
  641. u32 base_offset;
  642. if (!sde_irq || !item)
  643. return -EINVAL;
  644. base_offset = item->base_offset;
  645. switch (item->instance_idx) {
  646. case SDE_INTR_TOP_INTR:
  647. sde_irq->clr_off = base_offset + INTR_CLEAR;
  648. sde_irq->en_off = base_offset + INTR_EN;
  649. sde_irq->status_off = base_offset + INTR_STATUS;
  650. break;
  651. case SDE_INTR_TOP_INTR2:
  652. sde_irq->clr_off = base_offset + INTR2_CLEAR;
  653. sde_irq->en_off = base_offset + INTR2_EN;
  654. sde_irq->status_off = base_offset + INTR2_STATUS;
  655. break;
  656. case SDE_INTR_TOP_HIST_INTR:
  657. sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
  658. sde_irq->en_off = base_offset + HIST_INTR_EN;
  659. sde_irq->status_off = base_offset + HIST_INTR_STATUS;
  660. break;
  661. default:
  662. pr_err("invalid TOP intr for instance %d\n",
  663. item->instance_idx);
  664. return -EINVAL;
  665. }
  666. return 0;
  667. }
  668. static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
  669. struct sde_intr_irq_offsets *item)
  670. {
  671. u32 base_offset, rc = 0;
  672. if (!sde_irq || !item)
  673. return -EINVAL;
  674. base_offset = item->base_offset;
  675. switch (item->type) {
  676. case SDE_INTR_HWBLK_TOP:
  677. rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
  678. break;
  679. case SDE_INTR_HWBLK_INTF:
  680. sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
  681. sde_irq->en_off = base_offset + INTF_INTR_EN;
  682. sde_irq->status_off = base_offset + INTF_INTR_STATUS;
  683. break;
  684. case SDE_INTR_HWBLK_AD4:
  685. sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
  686. sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
  687. sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
  688. break;
  689. case SDE_INTR_HWBLK_INTF_TEAR:
  690. sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
  691. sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
  692. sde_irq->status_off = base_offset +
  693. MDP_INTF_TEAR_INTR_STATUS_OFF;
  694. break;
  695. case SDE_INTR_HWBLK_LTM:
  696. sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
  697. sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
  698. sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
  699. break;
  700. case SDE_INTR_HWBLK_WB:
  701. sde_irq->clr_off = base_offset + MDP_WB_INTR_CLEAR_OFF;
  702. sde_irq->en_off = base_offset + MDP_WB_INTR_EN_OFF;
  703. sde_irq->status_off = base_offset + MDP_WB_INTR_STATUS_OFF;
  704. break;
  705. default:
  706. pr_err("unrecognized intr blk type %d\n",
  707. item->type);
  708. rc = -EINVAL;
  709. }
  710. return rc;
  711. }
  712. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  713. {
  714. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  715. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  716. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  717. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  718. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  719. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  720. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  721. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  722. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  723. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  724. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  725. }
  726. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  727. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  728. {
  729. if (!m || !addr || !hw || m->mdp_count == 0)
  730. return NULL;
  731. hw->base_off = addr;
  732. hw->blk_off = m->mdss[0].base;
  733. hw->hw_rev = m->hw_rev;
  734. return &m->mdss[0];
  735. }
  736. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  737. {
  738. if (intr) {
  739. kfree(intr->sde_irq_tbl);
  740. kfree(intr->sde_irq_map);
  741. kfree(intr->cache_irq_mask);
  742. kfree(intr);
  743. }
  744. }
  745. static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
  746. {
  747. u32 ret = 0;
  748. switch (inst) {
  749. case SDE_INTR_TOP_INTR:
  750. ret = ARRAY_SIZE(sde_irq_intr_map);
  751. break;
  752. case SDE_INTR_TOP_INTR2:
  753. ret = ARRAY_SIZE(sde_irq_intr2_map);
  754. break;
  755. case SDE_INTR_TOP_HIST_INTR:
  756. ret = ARRAY_SIZE(sde_irq_hist_map);
  757. break;
  758. default:
  759. pr_err("invalid top inst:%d\n", inst);
  760. }
  761. return ret;
  762. }
  763. static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
  764. {
  765. u32 ret = 0;
  766. switch (item->type) {
  767. case SDE_INTR_HWBLK_TOP:
  768. ret = _get_irq_map_size_top(item->instance_idx);
  769. break;
  770. case SDE_INTR_HWBLK_INTF:
  771. ret = ARRAY_SIZE(sde_irq_intf_map);
  772. break;
  773. case SDE_INTR_HWBLK_AD4:
  774. ret = ARRAY_SIZE(sde_irq_ad4_map);
  775. break;
  776. case SDE_INTR_HWBLK_INTF_TEAR:
  777. ret = ARRAY_SIZE(sde_irq_intf_te_map);
  778. break;
  779. case SDE_INTR_HWBLK_LTM:
  780. ret = ARRAY_SIZE(sde_irq_ltm_map);
  781. break;
  782. case SDE_INTR_HWBLK_WB:
  783. ret = ARRAY_SIZE(sde_irq_wb_map);
  784. break;
  785. default:
  786. pr_err("invalid type: %d\n", item->type);
  787. }
  788. return ret;
  789. }
  790. static inline struct sde_irq_type *_get_irq_map_addr_top(
  791. enum sde_intr_top_intr inst)
  792. {
  793. struct sde_irq_type *ret = NULL;
  794. switch (inst) {
  795. case SDE_INTR_TOP_INTR:
  796. ret = sde_irq_intr_map;
  797. break;
  798. case SDE_INTR_TOP_INTR2:
  799. ret = sde_irq_intr2_map;
  800. break;
  801. case SDE_INTR_TOP_HIST_INTR:
  802. ret = sde_irq_hist_map;
  803. break;
  804. default:
  805. pr_err("invalid top inst:%d\n", inst);
  806. }
  807. return ret;
  808. }
  809. static inline struct sde_irq_type *_get_irq_map_addr(
  810. struct sde_intr_irq_offsets *item)
  811. {
  812. struct sde_irq_type *ret = NULL;
  813. switch (item->type) {
  814. case SDE_INTR_HWBLK_TOP:
  815. ret = _get_irq_map_addr_top(item->instance_idx);
  816. break;
  817. case SDE_INTR_HWBLK_INTF:
  818. ret = sde_irq_intf_map;
  819. break;
  820. case SDE_INTR_HWBLK_AD4:
  821. ret = sde_irq_ad4_map;
  822. break;
  823. case SDE_INTR_HWBLK_INTF_TEAR:
  824. ret = sde_irq_intf_te_map;
  825. break;
  826. case SDE_INTR_HWBLK_LTM:
  827. ret = sde_irq_ltm_map;
  828. break;
  829. case SDE_INTR_HWBLK_WB:
  830. ret = sde_irq_wb_map;
  831. break;
  832. default:
  833. pr_err("invalid type: %d\n", item->type);
  834. }
  835. return ret;
  836. }
  837. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  838. struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
  839. {
  840. int i, j = 0;
  841. struct sde_irq_type *src = _get_irq_map_addr(item);
  842. u32 src_size = _get_irq_map_size(item);
  843. if (!src)
  844. return -EINVAL;
  845. if (low_idx >= size || high_idx > size ||
  846. (high_idx - low_idx > src_size)) {
  847. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  848. low_idx, high_idx, size, src_size);
  849. return -EINVAL;
  850. }
  851. for (i = low_idx; i < high_idx; i++)
  852. sde_irq_map[i] = src[j++];
  853. return 0;
  854. }
  855. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  856. struct sde_mdss_cfg *m)
  857. {
  858. struct sde_intr_irq_offsets *item;
  859. int i, sde_irq_tbl_idx = 0, ret = 0;
  860. u32 low_idx, high_idx;
  861. u32 sde_irq_map_idx = 0;
  862. /* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
  863. list_for_each_entry(item, &m->irq_offset_list, list) {
  864. low_idx = sde_irq_map_idx;
  865. high_idx = low_idx + _get_irq_map_size(item);
  866. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  867. sde_irq_tbl_idx < 0) {
  868. ret = -EINVAL;
  869. goto exit;
  870. }
  871. /* init sde_irq_map with the global irq mapping table */
  872. if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
  873. item, low_idx, high_idx)) {
  874. ret = -EINVAL;
  875. goto exit;
  876. }
  877. /* init irq map with its reg & instance idxs in the irq tbl */
  878. for (i = low_idx; i < high_idx; i++) {
  879. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  880. if (item->type != SDE_INTR_HWBLK_TOP)
  881. intr->sde_irq_map[i].instance_idx =
  882. item->instance_idx;
  883. pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
  884. i, sde_irq_tbl_idx, item->instance_idx);
  885. }
  886. /* track the idx of the mapping table for this irq in
  887. * sde_irq_map, this to only access the indexes of this
  888. * irq during the irq dispatch
  889. */
  890. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
  891. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
  892. ret = _set_sde_irq_tbl_offset(
  893. &intr->sde_irq_tbl[sde_irq_tbl_idx], item);
  894. if (ret)
  895. goto exit;
  896. /* increment idx for both tables accordingly */
  897. sde_irq_tbl_idx++;
  898. sde_irq_map_idx = high_idx;
  899. }
  900. exit:
  901. sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
  902. return ret;
  903. }
  904. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  905. struct sde_mdss_cfg *m)
  906. {
  907. struct sde_hw_intr *intr = NULL;
  908. struct sde_mdss_base_cfg *cfg;
  909. struct sde_intr_irq_offsets *item;
  910. u32 irq_regs_count = 0;
  911. u32 irq_map_count = 0;
  912. u32 size;
  913. int ret = 0;
  914. if (!addr || !m) {
  915. ret = -EINVAL;
  916. goto exit;
  917. }
  918. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  919. if (!intr) {
  920. ret = -ENOMEM;
  921. goto exit;
  922. }
  923. cfg = __intr_offset(m, addr, &intr->hw);
  924. if (!cfg) {
  925. ret = -EINVAL;
  926. goto exit;
  927. }
  928. __setup_intr_ops(&intr->ops);
  929. /* check how many irq's this target supports */
  930. list_for_each_entry(item, &m->irq_offset_list, list) {
  931. size = _get_irq_map_size(item);
  932. if (!size || irq_map_count >= UINT_MAX - size) {
  933. pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
  934. irq_regs_count, item->type, item->instance_idx,
  935. size, irq_map_count);
  936. ret = -EINVAL;
  937. goto exit;
  938. }
  939. irq_regs_count++;
  940. irq_map_count += size;
  941. }
  942. if (irq_regs_count == 0 || irq_map_count == 0) {
  943. pr_err("invalid irq map: %d %d\n",
  944. irq_regs_count, irq_map_count);
  945. ret = -EINVAL;
  946. goto exit;
  947. }
  948. /* Allocate table for the irq registers */
  949. intr->sde_irq_size = irq_regs_count;
  950. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  951. GFP_KERNEL);
  952. if (intr->sde_irq_tbl == NULL) {
  953. ret = -ENOMEM;
  954. goto exit;
  955. }
  956. /* Allocate table with the valid interrupts bits */
  957. intr->sde_irq_map_size = irq_map_count;
  958. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  959. GFP_KERNEL);
  960. if (intr->sde_irq_map == NULL) {
  961. ret = -ENOMEM;
  962. goto exit;
  963. }
  964. /* Initialize IRQs tables */
  965. ret = _sde_hw_intr_init_irq_tables(intr, m);
  966. if (ret)
  967. goto exit;
  968. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  969. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  970. if (intr->cache_irq_mask == NULL) {
  971. ret = -ENOMEM;
  972. goto exit;
  973. }
  974. spin_lock_init(&intr->irq_lock);
  975. exit:
  976. if (ret) {
  977. sde_hw_intr_destroy(intr);
  978. return ERR_PTR(ret);
  979. }
  980. return intr;
  981. }