sde_hw_interrupts.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/slab.h>
  7. #include "sde_kms.h"
  8. #include "sde_hw_interrupts.h"
  9. #include "sde_hw_util.h"
  10. #include "sde_hw_mdss.h"
  11. /**
  12. * Register offsets in MDSS register file for the interrupt registers
  13. * w.r.t. base for that block. Base offsets for IRQs should come from the
  14. * device tree and get stored in the catalog(irq_offset_list) until they
  15. * are added to the sde_irq_tbl during the table initialization.
  16. */
  17. #define HW_INTR_STATUS 0x0010
  18. #define MDP_AD4_INTR_EN_OFF 0x41c
  19. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  20. #define MDP_AD4_INTR_STATUS_OFF 0x420
  21. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  22. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  23. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  24. #define MDP_LTM_INTR_EN_OFF 0x50
  25. #define MDP_LTM_INTR_STATUS_OFF 0x54
  26. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  27. /**
  28. * WB interrupt status bit definitions
  29. */
  30. #define SDE_INTR_WB_0_DONE BIT(0)
  31. #define SDE_INTR_WB_1_DONE BIT(1)
  32. #define SDE_INTR_WB_2_DONE BIT(4)
  33. /**
  34. * WDOG timer interrupt status bit definitions
  35. */
  36. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  37. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  38. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  39. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  40. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  41. /**
  42. * Pingpong interrupt status bit definitions
  43. */
  44. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  45. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  46. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  47. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  48. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  49. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  50. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  51. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  52. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  53. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  54. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  55. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  56. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  57. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  58. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  59. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  60. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  61. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  62. /**
  63. * Interface interrupt status bit definitions
  64. */
  65. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  66. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  67. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  68. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  69. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  70. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  71. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  72. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  73. /**
  74. * Pingpong Secondary interrupt status bit definitions
  75. */
  76. #define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
  77. #define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
  78. #define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
  79. #define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
  80. #define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
  81. /**
  82. * Pingpong TEAR detection interrupt status bit definitions
  83. */
  84. #define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
  85. #define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
  86. #define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
  87. #define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
  88. /**
  89. * Pingpong TE detection interrupt status bit definitions
  90. */
  91. #define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
  92. #define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
  93. #define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
  94. #define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
  95. /**
  96. * Ctl start interrupt status bit definitions
  97. */
  98. #define SDE_INTR_CTL_0_START BIT(9)
  99. #define SDE_INTR_CTL_1_START BIT(10)
  100. #define SDE_INTR_CTL_2_START BIT(11)
  101. #define SDE_INTR_CTL_3_START BIT(12)
  102. #define SDE_INTR_CTL_4_START BIT(13)
  103. #define SDE_INTR_CTL_5_START BIT(23)
  104. /**
  105. * Concurrent WB overflow interrupt status bit definitions
  106. */
  107. #define SDE_INTR_CWB_1_OVERFLOW BIT(8)
  108. #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
  109. #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
  110. #define SDE_INTR_CWB_4_OVERFLOW BIT(20)
  111. #define SDE_INTR_CWB_5_OVERFLOW BIT(21)
  112. #define SDE_INTR_CWB_OVERFLOW BIT(29)
  113. /**
  114. * Histogram VIG done interrupt status bit definitions
  115. */
  116. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  117. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  118. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  119. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  120. /**
  121. * Histogram VIG reset Sequence done interrupt status bit definitions
  122. */
  123. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  124. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  125. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  126. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  127. /**
  128. * Histogram DSPP done interrupt status bit definitions
  129. */
  130. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  131. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  132. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  133. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  134. /**
  135. * Histogram DSPP reset Sequence done interrupt status bit definitions
  136. */
  137. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  138. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  139. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  140. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  141. /**
  142. * INTF interrupt status bit definitions
  143. */
  144. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  145. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  146. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  147. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  148. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  149. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  150. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  151. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  152. #define SDE_INTR_PROG_LINE BIT(8)
  153. #define SDE_INTR_INTF_WD_TIMER_0_DONE BIT(13)
  154. /**
  155. * AD4 interrupt status bit definitions
  156. */
  157. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  158. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  159. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  160. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  161. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  162. /**
  163. * INTF Tear IRQ register bit definitions
  164. */
  165. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  166. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  167. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  168. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  169. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  170. /**
  171. * LTM interrupt status bit definitions
  172. */
  173. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  174. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  175. /**
  176. * struct sde_intr_reg - array of SDE register sets
  177. * @clr_off: offset to CLEAR reg
  178. * @en_off: offset to ENABLE reg
  179. * @status_off: offset to STATUS reg
  180. * @map_idx_start first offset in the sde_irq_map table
  181. * @map_idx_end last offset in the sde_irq_map table
  182. */
  183. struct sde_intr_reg {
  184. u32 clr_off;
  185. u32 en_off;
  186. u32 status_off;
  187. u32 map_idx_start;
  188. u32 map_idx_end;
  189. };
  190. /**
  191. * struct sde_irq_type - maps each irq with i/f
  192. * @intr_type: type of interrupt listed in sde_intr_type
  193. * @instance_idx: instance index of the associated HW block in SDE
  194. * @irq_mask: corresponding bit in the interrupt status reg
  195. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  196. * registers offsets to use.
  197. */
  198. struct sde_irq_type {
  199. u32 intr_type;
  200. u32 instance_idx;
  201. u32 irq_mask;
  202. int reg_idx;
  203. };
  204. /**
  205. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  206. * a matching interface type and instance index.
  207. * Each of these tables are copied to a dynamically allocated
  208. * table, that will be used to service each of the irqs
  209. * -1 indicates an uninitialized value which should be set when copying
  210. * these tables to the sde_irq_map.
  211. */
  212. static struct sde_irq_type sde_irq_intr_map[] = {
  213. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  214. { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
  215. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  216. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  217. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  218. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  219. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  220. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  221. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  222. SDE_INTR_PING_PONG_0_DONE, -1},
  223. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  224. SDE_INTR_PING_PONG_1_DONE, -1},
  225. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  226. SDE_INTR_PING_PONG_2_DONE, -1},
  227. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  228. SDE_INTR_PING_PONG_3_DONE, -1},
  229. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  230. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  231. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  232. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  233. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  234. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  235. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  236. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  237. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  238. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  239. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  240. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  241. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  242. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  243. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  244. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  245. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  246. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  247. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  248. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  249. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  250. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  251. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  252. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  253. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  254. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  255. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  256. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  257. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  258. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  259. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  260. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  261. };
  262. static struct sde_irq_type sde_irq_intr2_map[] = {
  263. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
  264. SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
  265. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
  266. SDE_INTR_PING_PONG_S0_WR_PTR, -1},
  267. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_1, SDE_INTR_CWB_1_OVERFLOW, -1},
  268. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
  269. SDE_INTR_PING_PONG_S0_RD_PTR, -1},
  270. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  271. SDE_INTR_CTL_0_START, -1},
  272. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  273. SDE_INTR_CTL_1_START, -1},
  274. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  275. SDE_INTR_CTL_2_START, -1},
  276. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  277. SDE_INTR_CTL_3_START, -1},
  278. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  279. SDE_INTR_CTL_4_START, -1},
  280. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  281. SDE_INTR_CTL_5_START, -1},
  282. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
  283. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
  284. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
  285. SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
  286. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
  287. SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
  288. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
  289. SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
  290. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
  291. SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
  292. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
  293. { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
  294. { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
  295. SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
  296. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
  297. SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
  298. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
  299. SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
  300. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
  301. SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
  302. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
  303. SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
  304. { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
  305. SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
  306. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_0, SDE_INTR_CWB_OVERFLOW, -1},
  307. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  308. SDE_INTR_PING_PONG_4_DONE, -1},
  309. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  310. SDE_INTR_PING_PONG_5_DONE, -1},
  311. };
  312. static struct sde_irq_type sde_irq_hist_map[] = {
  313. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  314. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  315. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  316. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  317. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  318. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  319. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  320. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  321. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  322. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  323. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  324. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  325. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  326. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  327. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  328. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  329. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  330. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  331. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  332. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  333. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  334. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  335. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  336. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  337. };
  338. static struct sde_irq_type sde_irq_intf_map[] = {
  339. { SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
  340. SDE_INTR_VIDEO_INTO_STATIC, -1},
  341. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
  342. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  343. { SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
  344. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  345. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
  346. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  347. { SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
  348. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  349. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
  350. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  351. { SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
  352. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  353. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
  354. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  355. { SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
  356. { SDE_IRQ_TYPE_WD_TIMER, -1, SDE_INTR_WD_TIMER_0_DONE, -1},
  357. };
  358. static struct sde_irq_type sde_irq_ad4_map[] = {
  359. { SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  360. };
  361. static struct sde_irq_type sde_irq_intf_te_map[] = {
  362. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
  363. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  364. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
  365. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  366. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
  367. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  368. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
  369. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  370. };
  371. static struct sde_irq_type sde_irq_ltm_map[] = {
  372. { SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
  373. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
  374. };
  375. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  376. enum sde_intr_type intr_type, u32 instance_idx)
  377. {
  378. int i;
  379. for (i = 0; i < intr->sde_irq_map_size; i++) {
  380. if (intr_type == intr->sde_irq_map[i].intr_type &&
  381. instance_idx == intr->sde_irq_map[i].instance_idx)
  382. return i;
  383. }
  384. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  385. intr_type, instance_idx);
  386. return -EINVAL;
  387. }
  388. static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
  389. uint32_t mask)
  390. {
  391. if (!intr)
  392. return;
  393. SDE_REG_WRITE(&intr->hw, reg_off, mask);
  394. /* ensure register writes go through */
  395. wmb();
  396. }
  397. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  398. void (*cbfunc)(void *, int),
  399. void *arg)
  400. {
  401. int reg_idx;
  402. int irq_idx;
  403. int start_idx;
  404. int end_idx;
  405. u32 irq_status;
  406. unsigned long irq_flags;
  407. if (!intr)
  408. return;
  409. /*
  410. * The dispatcher will save the IRQ status before calling here.
  411. * Now need to go through each IRQ status and find matching
  412. * irq lookup index.
  413. */
  414. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  415. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  416. irq_status = intr->save_irq_status[reg_idx];
  417. /*
  418. * Each Interrupt register has dynamic range of indexes,
  419. * initialized during hw_intr_init when sde_irq_tbl is created.
  420. */
  421. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  422. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  423. if (start_idx >= intr->sde_irq_map_size ||
  424. end_idx > intr->sde_irq_map_size)
  425. continue;
  426. /*
  427. * Search through matching intr status from irq map.
  428. * start_idx and end_idx defined the search range in
  429. * the sde_irq_map.
  430. */
  431. for (irq_idx = start_idx;
  432. (irq_idx < end_idx) && irq_status;
  433. irq_idx++)
  434. if ((irq_status &
  435. intr->sde_irq_map[irq_idx].irq_mask) &&
  436. (intr->sde_irq_map[irq_idx].reg_idx ==
  437. reg_idx)) {
  438. /*
  439. * Once a match on irq mask, perform a callback
  440. * to the given cbfunc. cbfunc will take care
  441. * the interrupt status clearing. If cbfunc is
  442. * not provided, then the interrupt clearing
  443. * is here.
  444. */
  445. if (cbfunc)
  446. cbfunc(arg, irq_idx);
  447. else
  448. intr->ops.clear_intr_status_nolock(
  449. intr, irq_idx);
  450. /*
  451. * When callback finish, clear the irq_status
  452. * with the matching mask. Once irq_status
  453. * is all cleared, the search can be stopped.
  454. */
  455. irq_status &=
  456. ~intr->sde_irq_map[irq_idx].irq_mask;
  457. }
  458. }
  459. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  460. }
  461. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  462. {
  463. int reg_idx;
  464. const struct sde_intr_reg *reg;
  465. const struct sde_irq_type *irq;
  466. const char *dbgstr = NULL;
  467. uint32_t cache_irq_mask;
  468. if (!intr)
  469. return -EINVAL;
  470. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  471. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  472. return -EINVAL;
  473. }
  474. irq = &intr->sde_irq_map[irq_idx];
  475. reg_idx = irq->reg_idx;
  476. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  477. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  478. return -EINVAL;
  479. }
  480. reg = &intr->sde_irq_tbl[reg_idx];
  481. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  482. if (cache_irq_mask & irq->irq_mask) {
  483. dbgstr = "SDE IRQ already set:";
  484. } else {
  485. dbgstr = "SDE IRQ enabled:";
  486. cache_irq_mask |= irq->irq_mask;
  487. /* Cleaning any pending interrupt */
  488. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  489. /* Enabling interrupts with the new mask */
  490. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  491. /* ensure register write goes through */
  492. wmb();
  493. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  494. }
  495. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  496. irq->irq_mask, cache_irq_mask);
  497. return 0;
  498. }
  499. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  500. {
  501. int reg_idx;
  502. const struct sde_intr_reg *reg;
  503. const struct sde_irq_type *irq;
  504. const char *dbgstr = NULL;
  505. uint32_t cache_irq_mask;
  506. if (!intr)
  507. return -EINVAL;
  508. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  509. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  510. return -EINVAL;
  511. }
  512. irq = &intr->sde_irq_map[irq_idx];
  513. reg_idx = irq->reg_idx;
  514. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  515. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  516. return -EINVAL;
  517. }
  518. reg = &intr->sde_irq_tbl[reg_idx];
  519. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  520. if ((cache_irq_mask & irq->irq_mask) == 0) {
  521. dbgstr = "SDE IRQ is already cleared:";
  522. } else {
  523. dbgstr = "SDE IRQ mask disable:";
  524. cache_irq_mask &= ~irq->irq_mask;
  525. /* Disable interrupts based on the new mask */
  526. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  527. /* Cleaning any pending interrupt */
  528. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  529. /* ensure register write goes through */
  530. wmb();
  531. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  532. }
  533. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  534. irq->irq_mask, cache_irq_mask);
  535. return 0;
  536. }
  537. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  538. {
  539. int i;
  540. if (!intr)
  541. return -EINVAL;
  542. for (i = 0; i < intr->sde_irq_size; i++)
  543. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  544. 0xffffffff);
  545. /* ensure register writes go through */
  546. wmb();
  547. return 0;
  548. }
  549. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  550. {
  551. int i;
  552. if (!intr)
  553. return -EINVAL;
  554. for (i = 0; i < intr->sde_irq_size; i++)
  555. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  556. 0x00000000);
  557. /* ensure register writes go through */
  558. wmb();
  559. return 0;
  560. }
  561. static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
  562. uint32_t *mask)
  563. {
  564. if (!intr || !mask)
  565. return -EINVAL;
  566. *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
  567. | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
  568. return 0;
  569. }
  570. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  571. uint32_t *sources)
  572. {
  573. if (!intr || !sources)
  574. return -EINVAL;
  575. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  576. return 0;
  577. }
  578. static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
  579. {
  580. int i;
  581. u32 enable_mask;
  582. unsigned long irq_flags;
  583. if (!intr)
  584. return;
  585. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  586. for (i = 0; i < intr->sde_irq_size; i++) {
  587. /* Read interrupt status */
  588. intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
  589. intr->sde_irq_tbl[i].status_off);
  590. /* Read enable mask */
  591. enable_mask = SDE_REG_READ(&intr->hw,
  592. intr->sde_irq_tbl[i].en_off);
  593. /* and clear the interrupt */
  594. if (intr->save_irq_status[i])
  595. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  596. intr->save_irq_status[i]);
  597. /* Finally update IRQ status based on enable mask */
  598. intr->save_irq_status[i] &= enable_mask;
  599. }
  600. /* ensure register writes go through */
  601. wmb();
  602. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  603. }
  604. static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
  605. int irq_idx, u32 irq_mask)
  606. {
  607. int reg_idx;
  608. if (!intr)
  609. return;
  610. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  611. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  612. return;
  613. }
  614. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  615. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  616. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  617. return;
  618. }
  619. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  620. irq_mask);
  621. /* ensure register writes go through */
  622. wmb();
  623. }
  624. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  625. int irq_idx)
  626. {
  627. int reg_idx;
  628. if (!intr)
  629. return;
  630. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  631. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  632. return;
  633. }
  634. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  635. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  636. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  637. return;
  638. }
  639. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  640. intr->sde_irq_map[irq_idx].irq_mask);
  641. /* ensure register writes go through */
  642. wmb();
  643. }
  644. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  645. int irq_idx)
  646. {
  647. unsigned long irq_flags;
  648. if (!intr)
  649. return;
  650. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  651. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  652. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  653. }
  654. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  655. int irq_idx, bool clear)
  656. {
  657. int reg_idx;
  658. u32 intr_status;
  659. if (!intr)
  660. return 0;
  661. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  662. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  663. return 0;
  664. }
  665. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  666. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  667. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  668. return 0;
  669. }
  670. intr_status = SDE_REG_READ(&intr->hw,
  671. intr->sde_irq_tbl[reg_idx].status_off) &
  672. intr->sde_irq_map[irq_idx].irq_mask;
  673. if (intr_status && clear)
  674. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  675. intr_status);
  676. /* ensure register writes go through */
  677. wmb();
  678. return intr_status;
  679. }
  680. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  681. int irq_idx, bool clear)
  682. {
  683. int reg_idx;
  684. unsigned long irq_flags;
  685. u32 intr_status;
  686. if (!intr)
  687. return 0;
  688. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  689. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  690. return 0;
  691. }
  692. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  693. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  694. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  695. return 0;
  696. }
  697. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  698. intr_status = SDE_REG_READ(&intr->hw,
  699. intr->sde_irq_tbl[reg_idx].status_off) &
  700. intr->sde_irq_map[irq_idx].irq_mask;
  701. if (intr_status && clear)
  702. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  703. intr_status);
  704. /* ensure register writes go through */
  705. wmb();
  706. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  707. return intr_status;
  708. }
  709. static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
  710. int irq_idx, bool clear)
  711. {
  712. int reg_idx;
  713. unsigned long irq_flags;
  714. u32 intr_status = 0;
  715. if (!intr)
  716. return 0;
  717. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  718. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  719. return 0;
  720. }
  721. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  722. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  723. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  724. return 0;
  725. }
  726. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  727. intr_status = SDE_REG_READ(&intr->hw,
  728. intr->sde_irq_tbl[reg_idx].status_off);
  729. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  730. return intr_status;
  731. }
  732. static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
  733. struct sde_intr_irq_offsets *item)
  734. {
  735. u32 base_offset;
  736. if (!sde_irq || !item)
  737. return -EINVAL;
  738. base_offset = item->base_offset;
  739. switch (item->instance_idx) {
  740. case SDE_INTR_TOP_INTR:
  741. sde_irq->clr_off = base_offset + INTR_CLEAR;
  742. sde_irq->en_off = base_offset + INTR_EN;
  743. sde_irq->status_off = base_offset + INTR_STATUS;
  744. break;
  745. case SDE_INTR_TOP_INTR2:
  746. sde_irq->clr_off = base_offset + INTR2_CLEAR;
  747. sde_irq->en_off = base_offset + INTR2_EN;
  748. sde_irq->status_off = base_offset + INTR2_STATUS;
  749. break;
  750. case SDE_INTR_TOP_HIST_INTR:
  751. sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
  752. sde_irq->en_off = base_offset + HIST_INTR_EN;
  753. sde_irq->status_off = base_offset + HIST_INTR_STATUS;
  754. break;
  755. default:
  756. pr_err("invalid TOP intr for instance %d\n",
  757. item->instance_idx);
  758. return -EINVAL;
  759. }
  760. return 0;
  761. }
  762. static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
  763. struct sde_intr_irq_offsets *item)
  764. {
  765. u32 base_offset, rc = 0;
  766. if (!sde_irq || !item)
  767. return -EINVAL;
  768. base_offset = item->base_offset;
  769. switch (item->type) {
  770. case SDE_INTR_HWBLK_TOP:
  771. rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
  772. break;
  773. case SDE_INTR_HWBLK_INTF:
  774. sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
  775. sde_irq->en_off = base_offset + INTF_INTR_EN;
  776. sde_irq->status_off = base_offset + INTF_INTR_STATUS;
  777. break;
  778. case SDE_INTR_HWBLK_AD4:
  779. sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
  780. sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
  781. sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
  782. break;
  783. case SDE_INTR_HWBLK_INTF_TEAR:
  784. sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
  785. sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
  786. sde_irq->status_off = base_offset +
  787. MDP_INTF_TEAR_INTR_STATUS_OFF;
  788. break;
  789. case SDE_INTR_HWBLK_LTM:
  790. sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
  791. sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
  792. sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
  793. break;
  794. default:
  795. pr_err("unrecognized intr blk type %d\n",
  796. item->type);
  797. rc = -EINVAL;
  798. }
  799. return rc;
  800. }
  801. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  802. {
  803. ops->set_mask = sde_hw_intr_set_mask;
  804. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  805. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  806. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  807. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  808. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  809. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  810. ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
  811. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  812. ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
  813. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  814. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  815. ops->clear_intr_status_force_mask =
  816. sde_hw_intr_clear_intr_status_force_mask;
  817. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  818. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  819. ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
  820. }
  821. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  822. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  823. {
  824. if (!m || !addr || !hw || m->mdp_count == 0)
  825. return NULL;
  826. hw->base_off = addr;
  827. hw->blk_off = m->mdss[0].base;
  828. hw->hwversion = m->hwversion;
  829. return &m->mdss[0];
  830. }
  831. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  832. {
  833. if (intr) {
  834. kfree(intr->sde_irq_tbl);
  835. kfree(intr->sde_irq_map);
  836. kfree(intr->cache_irq_mask);
  837. kfree(intr->save_irq_status);
  838. kfree(intr);
  839. }
  840. }
  841. static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
  842. {
  843. u32 ret = 0;
  844. switch (inst) {
  845. case SDE_INTR_TOP_INTR:
  846. ret = ARRAY_SIZE(sde_irq_intr_map);
  847. break;
  848. case SDE_INTR_TOP_INTR2:
  849. ret = ARRAY_SIZE(sde_irq_intr2_map);
  850. break;
  851. case SDE_INTR_TOP_HIST_INTR:
  852. ret = ARRAY_SIZE(sde_irq_hist_map);
  853. break;
  854. default:
  855. pr_err("invalid top inst:%d\n", inst);
  856. }
  857. return ret;
  858. }
  859. static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
  860. {
  861. u32 ret = 0;
  862. switch (item->type) {
  863. case SDE_INTR_HWBLK_TOP:
  864. ret = _get_irq_map_size_top(item->instance_idx);
  865. break;
  866. case SDE_INTR_HWBLK_INTF:
  867. ret = ARRAY_SIZE(sde_irq_intf_map);
  868. break;
  869. case SDE_INTR_HWBLK_AD4:
  870. ret = ARRAY_SIZE(sde_irq_ad4_map);
  871. break;
  872. case SDE_INTR_HWBLK_INTF_TEAR:
  873. ret = ARRAY_SIZE(sde_irq_intf_te_map);
  874. break;
  875. case SDE_INTR_HWBLK_LTM:
  876. ret = ARRAY_SIZE(sde_irq_ltm_map);
  877. break;
  878. default:
  879. pr_err("invalid type: %d\n", item->type);
  880. }
  881. return ret;
  882. }
  883. static inline struct sde_irq_type *_get_irq_map_addr_top(
  884. enum sde_intr_top_intr inst)
  885. {
  886. struct sde_irq_type *ret = NULL;
  887. switch (inst) {
  888. case SDE_INTR_TOP_INTR:
  889. ret = sde_irq_intr_map;
  890. break;
  891. case SDE_INTR_TOP_INTR2:
  892. ret = sde_irq_intr2_map;
  893. break;
  894. case SDE_INTR_TOP_HIST_INTR:
  895. ret = sde_irq_hist_map;
  896. break;
  897. default:
  898. pr_err("invalid top inst:%d\n", inst);
  899. }
  900. return ret;
  901. }
  902. static inline struct sde_irq_type *_get_irq_map_addr(
  903. struct sde_intr_irq_offsets *item)
  904. {
  905. struct sde_irq_type *ret = NULL;
  906. switch (item->type) {
  907. case SDE_INTR_HWBLK_TOP:
  908. ret = _get_irq_map_addr_top(item->instance_idx);
  909. break;
  910. case SDE_INTR_HWBLK_INTF:
  911. ret = sde_irq_intf_map;
  912. break;
  913. case SDE_INTR_HWBLK_AD4:
  914. ret = sde_irq_ad4_map;
  915. break;
  916. case SDE_INTR_HWBLK_INTF_TEAR:
  917. ret = sde_irq_intf_te_map;
  918. break;
  919. case SDE_INTR_HWBLK_LTM:
  920. ret = sde_irq_ltm_map;
  921. break;
  922. default:
  923. pr_err("invalid type: %d\n", item->type);
  924. }
  925. return ret;
  926. }
  927. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  928. struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
  929. {
  930. int i, j = 0;
  931. struct sde_irq_type *src = _get_irq_map_addr(item);
  932. u32 src_size = _get_irq_map_size(item);
  933. if (!src)
  934. return -EINVAL;
  935. if (low_idx >= size || high_idx > size ||
  936. (high_idx - low_idx > src_size)) {
  937. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  938. low_idx, high_idx, size, src_size);
  939. return -EINVAL;
  940. }
  941. for (i = low_idx; i < high_idx; i++)
  942. sde_irq_map[i] = src[j++];
  943. return 0;
  944. }
  945. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  946. struct sde_mdss_cfg *m)
  947. {
  948. struct sde_intr_irq_offsets *item;
  949. int i, sde_irq_tbl_idx = 0, ret = 0;
  950. u32 low_idx, high_idx;
  951. u32 sde_irq_map_idx = 0;
  952. /* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
  953. list_for_each_entry(item, &m->irq_offset_list, list) {
  954. low_idx = sde_irq_map_idx;
  955. high_idx = low_idx + _get_irq_map_size(item);
  956. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  957. sde_irq_tbl_idx < 0) {
  958. ret = -EINVAL;
  959. goto exit;
  960. }
  961. /* init sde_irq_map with the global irq mapping table */
  962. if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
  963. item, low_idx, high_idx)) {
  964. ret = -EINVAL;
  965. goto exit;
  966. }
  967. /* init irq map with its reg & instance idxs in the irq tbl */
  968. for (i = low_idx; i < high_idx; i++) {
  969. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  970. if (item->type != SDE_INTR_HWBLK_TOP)
  971. intr->sde_irq_map[i].instance_idx =
  972. item->instance_idx;
  973. pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
  974. i, sde_irq_tbl_idx, item->instance_idx);
  975. }
  976. /* track the idx of the mapping table for this irq in
  977. * sde_irq_map, this to only access the indexes of this
  978. * irq during the irq dispatch
  979. */
  980. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
  981. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
  982. ret = _set_sde_irq_tbl_offset(
  983. &intr->sde_irq_tbl[sde_irq_tbl_idx], item);
  984. if (ret)
  985. goto exit;
  986. /* increment idx for both tables accordingly */
  987. sde_irq_tbl_idx++;
  988. sde_irq_map_idx = high_idx;
  989. }
  990. exit:
  991. sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
  992. return ret;
  993. }
  994. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  995. struct sde_mdss_cfg *m)
  996. {
  997. struct sde_hw_intr *intr = NULL;
  998. struct sde_mdss_base_cfg *cfg;
  999. struct sde_intr_irq_offsets *item;
  1000. u32 irq_regs_count = 0;
  1001. u32 irq_map_count = 0;
  1002. u32 size;
  1003. int ret = 0;
  1004. if (!addr || !m) {
  1005. ret = -EINVAL;
  1006. goto exit;
  1007. }
  1008. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  1009. if (!intr) {
  1010. ret = -ENOMEM;
  1011. goto exit;
  1012. }
  1013. cfg = __intr_offset(m, addr, &intr->hw);
  1014. if (!cfg) {
  1015. ret = -EINVAL;
  1016. goto exit;
  1017. }
  1018. __setup_intr_ops(&intr->ops);
  1019. /* check how many irq's this target supports */
  1020. list_for_each_entry(item, &m->irq_offset_list, list) {
  1021. size = _get_irq_map_size(item);
  1022. if (!size || irq_map_count >= UINT_MAX - size) {
  1023. pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
  1024. irq_regs_count, item->type, item->instance_idx,
  1025. size, irq_map_count);
  1026. ret = -EINVAL;
  1027. goto exit;
  1028. }
  1029. irq_regs_count++;
  1030. irq_map_count += size;
  1031. }
  1032. if (irq_regs_count == 0 || irq_map_count == 0) {
  1033. pr_err("invalid irq map: %d %d\n",
  1034. irq_regs_count, irq_map_count);
  1035. ret = -EINVAL;
  1036. goto exit;
  1037. }
  1038. /* Allocate table for the irq registers */
  1039. intr->sde_irq_size = irq_regs_count;
  1040. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  1041. GFP_KERNEL);
  1042. if (intr->sde_irq_tbl == NULL) {
  1043. ret = -ENOMEM;
  1044. goto exit;
  1045. }
  1046. /* Allocate table with the valid interrupts bits */
  1047. intr->sde_irq_map_size = irq_map_count;
  1048. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  1049. GFP_KERNEL);
  1050. if (intr->sde_irq_map == NULL) {
  1051. ret = -ENOMEM;
  1052. goto exit;
  1053. }
  1054. /* Initialize IRQs tables */
  1055. ret = _sde_hw_intr_init_irq_tables(intr, m);
  1056. if (ret)
  1057. goto exit;
  1058. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  1059. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  1060. if (intr->cache_irq_mask == NULL) {
  1061. ret = -ENOMEM;
  1062. goto exit;
  1063. }
  1064. intr->save_irq_status = kcalloc(intr->sde_irq_size,
  1065. sizeof(*intr->save_irq_status), GFP_KERNEL);
  1066. if (intr->save_irq_status == NULL) {
  1067. ret = -ENOMEM;
  1068. goto exit;
  1069. }
  1070. spin_lock_init(&intr->irq_lock);
  1071. exit:
  1072. if (ret) {
  1073. sde_hw_intr_destroy(intr);
  1074. return ERR_PTR(ret);
  1075. }
  1076. return intr;
  1077. }