sde_hw_interrupts.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/slab.h>
  7. #include "sde_kms.h"
  8. #include "sde_hw_interrupts.h"
  9. #include "sde_hw_util.h"
  10. #include "sde_hw_mdss.h"
  11. /**
  12. * Register offsets in MDSS register file for the interrupt registers
  13. * w.r.t. base for that block. Base offsets for IRQs should come from the
  14. * device tree and get stored in the catalog(irq_offset_list) until they
  15. * are added to the sde_irq_tbl during the table initialization.
  16. */
  17. #define HW_INTR_STATUS 0x0010
  18. #define MDP_AD4_INTR_EN_OFF 0x41c
  19. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  20. #define MDP_AD4_INTR_STATUS_OFF 0x420
  21. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  22. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  23. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  24. #define MDP_LTM_INTR_EN_OFF 0x50
  25. #define MDP_LTM_INTR_STATUS_OFF 0x54
  26. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  27. #define MDP_WB_INTR_EN_OFF 0x18C
  28. #define MDP_WB_INTR_STATUS_OFF 0x190
  29. #define MDP_WB_INTR_CLEAR_OFF 0x194
  30. /**
  31. * WB interrupt status bit definitions
  32. */
  33. #define SDE_INTR_WB_0_DONE BIT(0)
  34. #define SDE_INTR_WB_1_DONE BIT(1)
  35. #define SDE_INTR_WB_2_DONE BIT(4)
  36. /**
  37. * WDOG timer interrupt status bit definitions
  38. */
  39. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  40. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  41. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  42. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  43. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  44. /**
  45. * Pingpong interrupt status bit definitions
  46. */
  47. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  48. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  49. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  50. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  51. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  52. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  53. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  54. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  55. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  56. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  57. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  58. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  59. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  60. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  61. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  62. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  63. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  64. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  65. /**
  66. * Interface interrupt status bit definitions
  67. */
  68. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  69. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  70. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  71. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  72. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  73. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  74. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  75. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  76. /**
  77. * Pingpong Secondary interrupt status bit definitions
  78. */
  79. #define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
  80. #define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
  81. #define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
  82. #define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
  83. #define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
  84. /**
  85. * Pingpong TEAR detection interrupt status bit definitions
  86. */
  87. #define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
  88. #define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
  89. #define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
  90. #define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
  91. /**
  92. * Pingpong TE detection interrupt status bit definitions
  93. */
  94. #define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
  95. #define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
  96. #define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
  97. #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
  98. /**
  99. * Ctl start interrupt status bit definitions
  100. */
  101. #define SDE_INTR_CTL_0_START BIT(9)
  102. #define SDE_INTR_CTL_1_START BIT(10)
  103. #define SDE_INTR_CTL_2_START BIT(11)
  104. #define SDE_INTR_CTL_3_START BIT(12)
  105. #define SDE_INTR_CTL_4_START BIT(13)
  106. #define SDE_INTR_CTL_5_START BIT(23)
  107. /**
  108. * Concurrent WB overflow interrupt status bit definitions
  109. */
  110. #define SDE_INTR_CWB_1_OVERFLOW BIT(8)
  111. #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
  112. #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
  113. #define SDE_INTR_CWB_4_OVERFLOW BIT(20)
  114. #define SDE_INTR_CWB_5_OVERFLOW BIT(21)
  115. #define SDE_INTR_CWB_OVERFLOW BIT(29)
  116. /**
  117. * Histogram VIG done interrupt status bit definitions
  118. */
  119. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  120. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  121. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  122. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  123. /**
  124. * Histogram VIG reset Sequence done interrupt status bit definitions
  125. */
  126. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  127. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  128. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  129. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  130. /**
  131. * Histogram DSPP done interrupt status bit definitions
  132. */
  133. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  134. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  135. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  136. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  137. /**
  138. * Histogram DSPP reset Sequence done interrupt status bit definitions
  139. */
  140. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  141. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  142. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  143. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  144. /**
  145. * INTF interrupt status bit definitions
  146. */
  147. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  148. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  149. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  150. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  151. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  152. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  153. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  154. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  155. #define SDE_INTR_PROG_LINE BIT(8)
  156. #define SDE_INTR_INTF_WD_TIMER_0_DONE BIT(13)
  157. /**
  158. * AD4 interrupt status bit definitions
  159. */
  160. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  161. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  162. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  163. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  164. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  165. /**
  166. * INTF Tear IRQ register bit definitions
  167. */
  168. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  169. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  170. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  171. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  172. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  173. /**
  174. * LTM interrupt status bit definitions
  175. */
  176. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  177. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  178. /**
  179. * WB interrupt status bit definitions
  180. */
  181. #define SDE_INTR_WB_PROG_LINE BIT(0)
  182. /**
  183. * struct sde_intr_reg - array of SDE register sets
  184. * @clr_off: offset to CLEAR reg
  185. * @en_off: offset to ENABLE reg
  186. * @status_off: offset to STATUS reg
  187. * @map_idx_start first offset in the sde_irq_map table
  188. * @map_idx_end last offset in the sde_irq_map table
  189. */
  190. struct sde_intr_reg {
  191. u32 clr_off;
  192. u32 en_off;
  193. u32 status_off;
  194. u32 map_idx_start;
  195. u32 map_idx_end;
  196. };
  197. /**
  198. * struct sde_irq_type - maps each irq with i/f
  199. * @intr_type: type of interrupt listed in sde_intr_type
  200. * @instance_idx: instance index of the associated HW block in SDE
  201. * @irq_mask: corresponding bit in the interrupt status reg
  202. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  203. * registers offsets to use.
  204. */
  205. struct sde_irq_type {
  206. u32 intr_type;
  207. u32 instance_idx;
  208. u32 irq_mask;
  209. int reg_idx;
  210. };
  211. /**
  212. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  213. * a matching interface type and instance index.
  214. * Each of these tables are copied to a dynamically allocated
  215. * table, that will be used to service each of the irqs
  216. * -1 indicates an uninitialized value which should be set when copying
  217. * these tables to the sde_irq_map.
  218. */
  219. static struct sde_irq_type sde_irq_intr_map[] = {
  220. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  221. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
  222. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  223. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  224. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  225. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  226. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  227. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  228. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  229. SDE_INTR_PING_PONG_0_DONE, -1},
  230. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  231. SDE_INTR_PING_PONG_1_DONE, -1},
  232. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  233. SDE_INTR_PING_PONG_2_DONE, -1},
  234. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  235. SDE_INTR_PING_PONG_3_DONE, -1},
  236. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  237. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  238. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  239. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  240. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  241. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  242. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  243. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  244. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  245. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  246. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  247. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  248. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  249. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  250. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  251. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  252. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  253. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  254. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  255. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  256. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  257. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  258. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  259. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  260. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  261. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  262. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  263. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  264. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  265. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  266. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  267. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  268. };
  269. static struct sde_irq_type sde_irq_intr2_map[] = {
  270. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
  271. SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
  272. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
  273. SDE_INTR_PING_PONG_S0_WR_PTR, -1},
  274. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_1, SDE_INTR_CWB_1_OVERFLOW, -1},
  275. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
  276. SDE_INTR_PING_PONG_S0_RD_PTR, -1},
  277. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  278. SDE_INTR_CTL_0_START, -1},
  279. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  280. SDE_INTR_CTL_1_START, -1},
  281. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  282. SDE_INTR_CTL_2_START, -1},
  283. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  284. SDE_INTR_CTL_3_START, -1},
  285. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  286. SDE_INTR_CTL_4_START, -1},
  287. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  288. SDE_INTR_CTL_5_START, -1},
  289. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  290. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
  291. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
  292. SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
  293. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
  294. SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
  295. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
  296. SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
  297. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
  298. SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
  299. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
  300. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
  301. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
  302. SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
  303. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
  304. SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
  305. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
  306. SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
  307. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
  308. SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
  309. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
  310. SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
  311. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
  312. SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
  313. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_0, SDE_INTR_CWB_OVERFLOW, -1},
  314. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  315. SDE_INTR_PING_PONG_4_DONE, -1},
  316. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  317. SDE_INTR_PING_PONG_5_DONE, -1},
  318. };
  319. static struct sde_irq_type sde_irq_hist_map[] = {
  320. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  321. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  322. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  323. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  324. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  325. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  326. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  327. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  328. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  329. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  330. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  331. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  332. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  333. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  334. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  335. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  336. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  337. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  338. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  339. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  340. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  341. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  342. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  343. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  344. };
  345. static struct sde_irq_type sde_irq_intf_map[] = {
  346. { SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
  347. SDE_INTR_VIDEO_INTO_STATIC, -1},
  348. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
  349. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  350. { SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
  351. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  352. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
  353. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  354. { SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
  355. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  356. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
  357. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  358. { SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
  359. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  360. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
  361. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  362. { SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
  363. { SDE_IRQ_TYPE_WD_TIMER, -1, SDE_INTR_WD_TIMER_0_DONE, -1},
  364. };
  365. static struct sde_irq_type sde_irq_ad4_map[] = {
  366. { SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  367. };
  368. static struct sde_irq_type sde_irq_intf_te_map[] = {
  369. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
  370. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  371. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
  372. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  373. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
  374. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  375. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
  376. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  377. };
  378. static struct sde_irq_type sde_irq_ltm_map[] = {
  379. { SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
  380. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
  381. };
  382. static struct sde_irq_type sde_irq_wb_map[] = {
  383. { SDE_IRQ_TYPE_WB_PROG_LINE, -1, SDE_INTR_WB_PROG_LINE, -1},
  384. };
  385. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  386. enum sde_intr_type intr_type, u32 instance_idx)
  387. {
  388. int i;
  389. for (i = 0; i < intr->sde_irq_map_size; i++) {
  390. if (intr_type == intr->sde_irq_map[i].intr_type &&
  391. instance_idx == intr->sde_irq_map[i].instance_idx)
  392. return i;
  393. }
  394. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  395. intr_type, instance_idx);
  396. return -EINVAL;
  397. }
  398. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  399. void (*cbfunc)(void *, int),
  400. void *arg)
  401. {
  402. int reg_idx;
  403. int irq_idx;
  404. int start_idx;
  405. int end_idx;
  406. u32 irq_status;
  407. unsigned long irq_flags;
  408. if (!intr)
  409. return;
  410. /*
  411. * The dispatcher will save the IRQ status before calling here.
  412. * Now need to go through each IRQ status and find matching
  413. * irq lookup index.
  414. */
  415. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  416. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  417. irq_status = intr->save_irq_status[reg_idx];
  418. /*
  419. * Each Interrupt register has dynamic range of indexes,
  420. * initialized during hw_intr_init when sde_irq_tbl is created.
  421. */
  422. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  423. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  424. if (start_idx >= intr->sde_irq_map_size ||
  425. end_idx > intr->sde_irq_map_size)
  426. continue;
  427. /*
  428. * Search through matching intr status from irq map.
  429. * start_idx and end_idx defined the search range in
  430. * the sde_irq_map.
  431. */
  432. for (irq_idx = start_idx;
  433. (irq_idx < end_idx) && irq_status;
  434. irq_idx++)
  435. if ((irq_status &
  436. intr->sde_irq_map[irq_idx].irq_mask) &&
  437. (intr->sde_irq_map[irq_idx].reg_idx ==
  438. reg_idx)) {
  439. /*
  440. * Once a match on irq mask, perform a callback
  441. * to the given cbfunc. cbfunc will take care
  442. * the interrupt status clearing. If cbfunc is
  443. * not provided, then the interrupt clearing
  444. * is here.
  445. */
  446. if (cbfunc)
  447. cbfunc(arg, irq_idx);
  448. else
  449. intr->ops.clear_intr_status_nolock(
  450. intr, irq_idx);
  451. /*
  452. * When callback finish, clear the irq_status
  453. * with the matching mask. Once irq_status
  454. * is all cleared, the search can be stopped.
  455. */
  456. irq_status &=
  457. ~intr->sde_irq_map[irq_idx].irq_mask;
  458. }
  459. }
  460. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  461. }
  462. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  463. {
  464. int reg_idx;
  465. const struct sde_intr_reg *reg;
  466. const struct sde_irq_type *irq;
  467. const char *dbgstr = NULL;
  468. uint32_t cache_irq_mask;
  469. if (!intr)
  470. return -EINVAL;
  471. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  472. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  473. return -EINVAL;
  474. }
  475. irq = &intr->sde_irq_map[irq_idx];
  476. reg_idx = irq->reg_idx;
  477. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  478. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  479. return -EINVAL;
  480. }
  481. reg = &intr->sde_irq_tbl[reg_idx];
  482. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  483. if (cache_irq_mask & irq->irq_mask) {
  484. dbgstr = "SDE IRQ already set:";
  485. } else {
  486. dbgstr = "SDE IRQ enabled:";
  487. cache_irq_mask |= irq->irq_mask;
  488. /* Cleaning any pending interrupt */
  489. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  490. /* Enabling interrupts with the new mask */
  491. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  492. /* ensure register write goes through */
  493. wmb();
  494. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  495. }
  496. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  497. irq->irq_mask, cache_irq_mask);
  498. return 0;
  499. }
  500. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  501. {
  502. int reg_idx;
  503. const struct sde_intr_reg *reg;
  504. const struct sde_irq_type *irq;
  505. const char *dbgstr = NULL;
  506. uint32_t cache_irq_mask;
  507. if (!intr)
  508. return -EINVAL;
  509. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  510. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  511. return -EINVAL;
  512. }
  513. irq = &intr->sde_irq_map[irq_idx];
  514. reg_idx = irq->reg_idx;
  515. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  516. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  517. return -EINVAL;
  518. }
  519. reg = &intr->sde_irq_tbl[reg_idx];
  520. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  521. if ((cache_irq_mask & irq->irq_mask) == 0) {
  522. dbgstr = "SDE IRQ is already cleared:";
  523. } else {
  524. dbgstr = "SDE IRQ mask disable:";
  525. cache_irq_mask &= ~irq->irq_mask;
  526. /* Disable interrupts based on the new mask */
  527. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  528. /* Cleaning any pending interrupt */
  529. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  530. /* ensure register write goes through */
  531. wmb();
  532. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  533. }
  534. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  535. irq->irq_mask, cache_irq_mask);
  536. return 0;
  537. }
  538. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  539. {
  540. int i;
  541. if (!intr)
  542. return -EINVAL;
  543. for (i = 0; i < intr->sde_irq_size; i++)
  544. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  545. 0xffffffff);
  546. /* ensure register writes go through */
  547. wmb();
  548. return 0;
  549. }
  550. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  551. {
  552. int i;
  553. if (!intr)
  554. return -EINVAL;
  555. for (i = 0; i < intr->sde_irq_size; i++)
  556. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  557. 0x00000000);
  558. /* ensure register writes go through */
  559. wmb();
  560. return 0;
  561. }
  562. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  563. uint32_t *sources)
  564. {
  565. if (!intr || !sources)
  566. return -EINVAL;
  567. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  568. return 0;
  569. }
  570. static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
  571. {
  572. int i;
  573. u32 enable_mask;
  574. unsigned long irq_flags;
  575. if (!intr)
  576. return;
  577. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  578. for (i = 0; i < intr->sde_irq_size; i++) {
  579. /* Read interrupt status */
  580. intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
  581. intr->sde_irq_tbl[i].status_off);
  582. /* Read enable mask */
  583. enable_mask = SDE_REG_READ(&intr->hw,
  584. intr->sde_irq_tbl[i].en_off);
  585. /* and clear the interrupt */
  586. if (intr->save_irq_status[i])
  587. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  588. intr->save_irq_status[i]);
  589. /* Finally update IRQ status based on enable mask */
  590. intr->save_irq_status[i] &= enable_mask;
  591. }
  592. /* ensure register writes go through */
  593. wmb();
  594. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  595. }
  596. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  597. int irq_idx)
  598. {
  599. int reg_idx;
  600. if (!intr)
  601. return;
  602. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  603. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  604. return;
  605. }
  606. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  607. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  608. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  609. return;
  610. }
  611. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  612. intr->sde_irq_map[irq_idx].irq_mask);
  613. /* ensure register writes go through */
  614. wmb();
  615. }
  616. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  617. int irq_idx)
  618. {
  619. unsigned long irq_flags;
  620. if (!intr)
  621. return;
  622. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  623. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  624. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  625. }
  626. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  627. int irq_idx, bool clear)
  628. {
  629. int reg_idx;
  630. u32 intr_status;
  631. if (!intr)
  632. return 0;
  633. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  634. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  635. return 0;
  636. }
  637. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  638. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  639. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  640. return 0;
  641. }
  642. intr_status = SDE_REG_READ(&intr->hw,
  643. intr->sde_irq_tbl[reg_idx].status_off) &
  644. intr->sde_irq_map[irq_idx].irq_mask;
  645. if (intr_status && clear)
  646. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  647. intr_status);
  648. /* ensure register writes go through */
  649. wmb();
  650. return intr_status;
  651. }
  652. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  653. int irq_idx, bool clear)
  654. {
  655. int reg_idx;
  656. unsigned long irq_flags;
  657. u32 intr_status;
  658. if (!intr)
  659. return 0;
  660. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  661. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  662. return 0;
  663. }
  664. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  665. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  666. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  667. return 0;
  668. }
  669. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  670. intr_status = SDE_REG_READ(&intr->hw,
  671. intr->sde_irq_tbl[reg_idx].status_off) &
  672. intr->sde_irq_map[irq_idx].irq_mask;
  673. if (intr_status && clear)
  674. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  675. intr_status);
  676. /* ensure register writes go through */
  677. wmb();
  678. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  679. return intr_status;
  680. }
  681. static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
  682. struct sde_intr_irq_offsets *item)
  683. {
  684. u32 base_offset;
  685. if (!sde_irq || !item)
  686. return -EINVAL;
  687. base_offset = item->base_offset;
  688. switch (item->instance_idx) {
  689. case SDE_INTR_TOP_INTR:
  690. sde_irq->clr_off = base_offset + INTR_CLEAR;
  691. sde_irq->en_off = base_offset + INTR_EN;
  692. sde_irq->status_off = base_offset + INTR_STATUS;
  693. break;
  694. case SDE_INTR_TOP_INTR2:
  695. sde_irq->clr_off = base_offset + INTR2_CLEAR;
  696. sde_irq->en_off = base_offset + INTR2_EN;
  697. sde_irq->status_off = base_offset + INTR2_STATUS;
  698. break;
  699. case SDE_INTR_TOP_HIST_INTR:
  700. sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
  701. sde_irq->en_off = base_offset + HIST_INTR_EN;
  702. sde_irq->status_off = base_offset + HIST_INTR_STATUS;
  703. break;
  704. default:
  705. pr_err("invalid TOP intr for instance %d\n",
  706. item->instance_idx);
  707. return -EINVAL;
  708. }
  709. return 0;
  710. }
  711. static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
  712. struct sde_intr_irq_offsets *item)
  713. {
  714. u32 base_offset, rc = 0;
  715. if (!sde_irq || !item)
  716. return -EINVAL;
  717. base_offset = item->base_offset;
  718. switch (item->type) {
  719. case SDE_INTR_HWBLK_TOP:
  720. rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
  721. break;
  722. case SDE_INTR_HWBLK_INTF:
  723. sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
  724. sde_irq->en_off = base_offset + INTF_INTR_EN;
  725. sde_irq->status_off = base_offset + INTF_INTR_STATUS;
  726. break;
  727. case SDE_INTR_HWBLK_AD4:
  728. sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
  729. sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
  730. sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
  731. break;
  732. case SDE_INTR_HWBLK_INTF_TEAR:
  733. sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
  734. sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
  735. sde_irq->status_off = base_offset +
  736. MDP_INTF_TEAR_INTR_STATUS_OFF;
  737. break;
  738. case SDE_INTR_HWBLK_LTM:
  739. sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
  740. sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
  741. sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
  742. break;
  743. case SDE_INTR_HWBLK_WB:
  744. sde_irq->clr_off = base_offset + MDP_WB_INTR_CLEAR_OFF;
  745. sde_irq->en_off = base_offset + MDP_WB_INTR_EN_OFF;
  746. sde_irq->status_off = base_offset + MDP_WB_INTR_STATUS_OFF;
  747. break;
  748. default:
  749. pr_err("unrecognized intr blk type %d\n",
  750. item->type);
  751. rc = -EINVAL;
  752. }
  753. return rc;
  754. }
  755. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  756. {
  757. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  758. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  759. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  760. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  761. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  762. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  763. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  764. ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
  765. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  766. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  767. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  768. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  769. }
  770. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  771. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  772. {
  773. if (!m || !addr || !hw || m->mdp_count == 0)
  774. return NULL;
  775. hw->base_off = addr;
  776. hw->blk_off = m->mdss[0].base;
  777. hw->hw_rev = m->hw_rev;
  778. return &m->mdss[0];
  779. }
  780. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  781. {
  782. if (intr) {
  783. kfree(intr->sde_irq_tbl);
  784. kfree(intr->sde_irq_map);
  785. kfree(intr->cache_irq_mask);
  786. kfree(intr->save_irq_status);
  787. kfree(intr);
  788. }
  789. }
  790. static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
  791. {
  792. u32 ret = 0;
  793. switch (inst) {
  794. case SDE_INTR_TOP_INTR:
  795. ret = ARRAY_SIZE(sde_irq_intr_map);
  796. break;
  797. case SDE_INTR_TOP_INTR2:
  798. ret = ARRAY_SIZE(sde_irq_intr2_map);
  799. break;
  800. case SDE_INTR_TOP_HIST_INTR:
  801. ret = ARRAY_SIZE(sde_irq_hist_map);
  802. break;
  803. default:
  804. pr_err("invalid top inst:%d\n", inst);
  805. }
  806. return ret;
  807. }
  808. static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
  809. {
  810. u32 ret = 0;
  811. switch (item->type) {
  812. case SDE_INTR_HWBLK_TOP:
  813. ret = _get_irq_map_size_top(item->instance_idx);
  814. break;
  815. case SDE_INTR_HWBLK_INTF:
  816. ret = ARRAY_SIZE(sde_irq_intf_map);
  817. break;
  818. case SDE_INTR_HWBLK_AD4:
  819. ret = ARRAY_SIZE(sde_irq_ad4_map);
  820. break;
  821. case SDE_INTR_HWBLK_INTF_TEAR:
  822. ret = ARRAY_SIZE(sde_irq_intf_te_map);
  823. break;
  824. case SDE_INTR_HWBLK_LTM:
  825. ret = ARRAY_SIZE(sde_irq_ltm_map);
  826. break;
  827. case SDE_INTR_HWBLK_WB:
  828. ret = ARRAY_SIZE(sde_irq_wb_map);
  829. break;
  830. default:
  831. pr_err("invalid type: %d\n", item->type);
  832. }
  833. return ret;
  834. }
  835. static inline struct sde_irq_type *_get_irq_map_addr_top(
  836. enum sde_intr_top_intr inst)
  837. {
  838. struct sde_irq_type *ret = NULL;
  839. switch (inst) {
  840. case SDE_INTR_TOP_INTR:
  841. ret = sde_irq_intr_map;
  842. break;
  843. case SDE_INTR_TOP_INTR2:
  844. ret = sde_irq_intr2_map;
  845. break;
  846. case SDE_INTR_TOP_HIST_INTR:
  847. ret = sde_irq_hist_map;
  848. break;
  849. default:
  850. pr_err("invalid top inst:%d\n", inst);
  851. }
  852. return ret;
  853. }
  854. static inline struct sde_irq_type *_get_irq_map_addr(
  855. struct sde_intr_irq_offsets *item)
  856. {
  857. struct sde_irq_type *ret = NULL;
  858. switch (item->type) {
  859. case SDE_INTR_HWBLK_TOP:
  860. ret = _get_irq_map_addr_top(item->instance_idx);
  861. break;
  862. case SDE_INTR_HWBLK_INTF:
  863. ret = sde_irq_intf_map;
  864. break;
  865. case SDE_INTR_HWBLK_AD4:
  866. ret = sde_irq_ad4_map;
  867. break;
  868. case SDE_INTR_HWBLK_INTF_TEAR:
  869. ret = sde_irq_intf_te_map;
  870. break;
  871. case SDE_INTR_HWBLK_LTM:
  872. ret = sde_irq_ltm_map;
  873. break;
  874. case SDE_INTR_HWBLK_WB:
  875. ret = sde_irq_wb_map;
  876. break;
  877. default:
  878. pr_err("invalid type: %d\n", item->type);
  879. }
  880. return ret;
  881. }
  882. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  883. struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
  884. {
  885. int i, j = 0;
  886. struct sde_irq_type *src = _get_irq_map_addr(item);
  887. u32 src_size = _get_irq_map_size(item);
  888. if (!src)
  889. return -EINVAL;
  890. if (low_idx >= size || high_idx > size ||
  891. (high_idx - low_idx > src_size)) {
  892. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  893. low_idx, high_idx, size, src_size);
  894. return -EINVAL;
  895. }
  896. for (i = low_idx; i < high_idx; i++)
  897. sde_irq_map[i] = src[j++];
  898. return 0;
  899. }
  900. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  901. struct sde_mdss_cfg *m)
  902. {
  903. struct sde_intr_irq_offsets *item;
  904. int i, sde_irq_tbl_idx = 0, ret = 0;
  905. u32 low_idx, high_idx;
  906. u32 sde_irq_map_idx = 0;
  907. /* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
  908. list_for_each_entry(item, &m->irq_offset_list, list) {
  909. low_idx = sde_irq_map_idx;
  910. high_idx = low_idx + _get_irq_map_size(item);
  911. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  912. sde_irq_tbl_idx < 0) {
  913. ret = -EINVAL;
  914. goto exit;
  915. }
  916. /* init sde_irq_map with the global irq mapping table */
  917. if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
  918. item, low_idx, high_idx)) {
  919. ret = -EINVAL;
  920. goto exit;
  921. }
  922. /* init irq map with its reg & instance idxs in the irq tbl */
  923. for (i = low_idx; i < high_idx; i++) {
  924. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  925. if (item->type != SDE_INTR_HWBLK_TOP)
  926. intr->sde_irq_map[i].instance_idx =
  927. item->instance_idx;
  928. pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
  929. i, sde_irq_tbl_idx, item->instance_idx);
  930. }
  931. /* track the idx of the mapping table for this irq in
  932. * sde_irq_map, this to only access the indexes of this
  933. * irq during the irq dispatch
  934. */
  935. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
  936. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
  937. ret = _set_sde_irq_tbl_offset(
  938. &intr->sde_irq_tbl[sde_irq_tbl_idx], item);
  939. if (ret)
  940. goto exit;
  941. /* increment idx for both tables accordingly */
  942. sde_irq_tbl_idx++;
  943. sde_irq_map_idx = high_idx;
  944. }
  945. exit:
  946. sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
  947. return ret;
  948. }
  949. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  950. struct sde_mdss_cfg *m)
  951. {
  952. struct sde_hw_intr *intr = NULL;
  953. struct sde_mdss_base_cfg *cfg;
  954. struct sde_intr_irq_offsets *item;
  955. u32 irq_regs_count = 0;
  956. u32 irq_map_count = 0;
  957. u32 size;
  958. int ret = 0;
  959. if (!addr || !m) {
  960. ret = -EINVAL;
  961. goto exit;
  962. }
  963. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  964. if (!intr) {
  965. ret = -ENOMEM;
  966. goto exit;
  967. }
  968. cfg = __intr_offset(m, addr, &intr->hw);
  969. if (!cfg) {
  970. ret = -EINVAL;
  971. goto exit;
  972. }
  973. __setup_intr_ops(&intr->ops);
  974. /* check how many irq's this target supports */
  975. list_for_each_entry(item, &m->irq_offset_list, list) {
  976. size = _get_irq_map_size(item);
  977. if (!size || irq_map_count >= UINT_MAX - size) {
  978. pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
  979. irq_regs_count, item->type, item->instance_idx,
  980. size, irq_map_count);
  981. ret = -EINVAL;
  982. goto exit;
  983. }
  984. irq_regs_count++;
  985. irq_map_count += size;
  986. }
  987. if (irq_regs_count == 0 || irq_map_count == 0) {
  988. pr_err("invalid irq map: %d %d\n",
  989. irq_regs_count, irq_map_count);
  990. ret = -EINVAL;
  991. goto exit;
  992. }
  993. /* Allocate table for the irq registers */
  994. intr->sde_irq_size = irq_regs_count;
  995. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  996. GFP_KERNEL);
  997. if (intr->sde_irq_tbl == NULL) {
  998. ret = -ENOMEM;
  999. goto exit;
  1000. }
  1001. /* Allocate table with the valid interrupts bits */
  1002. intr->sde_irq_map_size = irq_map_count;
  1003. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  1004. GFP_KERNEL);
  1005. if (intr->sde_irq_map == NULL) {
  1006. ret = -ENOMEM;
  1007. goto exit;
  1008. }
  1009. /* Initialize IRQs tables */
  1010. ret = _sde_hw_intr_init_irq_tables(intr, m);
  1011. if (ret)
  1012. goto exit;
  1013. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  1014. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  1015. if (intr->cache_irq_mask == NULL) {
  1016. ret = -ENOMEM;
  1017. goto exit;
  1018. }
  1019. intr->save_irq_status = kcalloc(intr->sde_irq_size,
  1020. sizeof(*intr->save_irq_status), GFP_KERNEL);
  1021. if (intr->save_irq_status == NULL) {
  1022. ret = -ENOMEM;
  1023. goto exit;
  1024. }
  1025. spin_lock_init(&intr->irq_lock);
  1026. exit:
  1027. if (ret) {
  1028. sde_hw_intr_destroy(intr);
  1029. return ERR_PTR(ret);
  1030. }
  1031. return intr;
  1032. }