sde_hw_interrupts.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/slab.h>
  8. #include "sde_kms.h"
  9. #include "sde_hw_interrupts.h"
  10. #include "sde_hw_util.h"
  11. #include "sde_hw_mdss.h"
  12. /**
  13. * Register offsets in MDSS register file for the interrupt registers
  14. * w.r.t. base for that block. Base offsets for IRQs should come from the
  15. * device tree and get stored in the catalog(irq_offset_list) until they
  16. * are added to the sde_irq_tbl during the table initialization.
  17. */
  18. #define HW_INTR_STATUS 0x0010
  19. #define MDP_AD4_INTR_EN_OFF 0x41c
  20. #define MDP_AD4_INTR_CLEAR_OFF 0x424
  21. #define MDP_AD4_INTR_STATUS_OFF 0x420
  22. #define MDP_INTF_TEAR_INTR_EN_OFF 0x0
  23. #define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
  24. #define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
  25. #define MDP_LTM_INTR_EN_OFF 0x50
  26. #define MDP_LTM_INTR_STATUS_OFF 0x54
  27. #define MDP_LTM_INTR_CLEAR_OFF 0x58
  28. #define MDP_WB_INTR_EN_OFF 0x18C
  29. #define MDP_WB_INTR_STATUS_OFF 0x190
  30. #define MDP_WB_INTR_CLEAR_OFF 0x194
  31. /**
  32. * WB interrupt status bit definitions
  33. */
  34. #define SDE_INTR_WB_0_DONE BIT(0)
  35. #define SDE_INTR_WB_1_DONE BIT(1)
  36. #define SDE_INTR_WB_2_DONE BIT(4)
  37. /**
  38. * WDOG timer interrupt status bit definitions
  39. */
  40. #define SDE_INTR_WD_TIMER_0_DONE BIT(2)
  41. #define SDE_INTR_WD_TIMER_1_DONE BIT(3)
  42. #define SDE_INTR_WD_TIMER_2_DONE BIT(5)
  43. #define SDE_INTR_WD_TIMER_3_DONE BIT(6)
  44. #define SDE_INTR_WD_TIMER_4_DONE BIT(7)
  45. /**
  46. * Pingpong interrupt status bit definitions
  47. */
  48. #define SDE_INTR_PING_PONG_0_DONE BIT(8)
  49. #define SDE_INTR_PING_PONG_1_DONE BIT(9)
  50. #define SDE_INTR_PING_PONG_2_DONE BIT(10)
  51. #define SDE_INTR_PING_PONG_3_DONE BIT(11)
  52. #define SDE_INTR_PING_PONG_4_DONE BIT(30)
  53. #define SDE_INTR_PING_PONG_5_DONE BIT(31)
  54. #define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
  55. #define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
  56. #define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
  57. #define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
  58. #define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
  59. #define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
  60. #define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
  61. #define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
  62. #define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
  63. #define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
  64. #define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
  65. #define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
  66. /**
  67. * Interface interrupt status bit definitions
  68. */
  69. #define SDE_INTR_INTF_0_UNDERRUN BIT(24)
  70. #define SDE_INTR_INTF_1_UNDERRUN BIT(26)
  71. #define SDE_INTR_INTF_2_UNDERRUN BIT(28)
  72. #define SDE_INTR_INTF_3_UNDERRUN BIT(30)
  73. #define SDE_INTR_INTF_0_VSYNC BIT(25)
  74. #define SDE_INTR_INTF_1_VSYNC BIT(27)
  75. #define SDE_INTR_INTF_2_VSYNC BIT(29)
  76. #define SDE_INTR_INTF_3_VSYNC BIT(31)
  77. /**
  78. * Ctl start interrupt status bit definitions
  79. */
  80. #define SDE_INTR_CTL_0_START BIT(9)
  81. #define SDE_INTR_CTL_1_START BIT(10)
  82. #define SDE_INTR_CTL_2_START BIT(11)
  83. #define SDE_INTR_CTL_3_START BIT(12)
  84. #define SDE_INTR_CTL_4_START BIT(13)
  85. #define SDE_INTR_CTL_5_START BIT(23)
  86. /**
  87. * Ctl done interrupt status bit definitions
  88. */
  89. #define SDE_INTR_CTL_0_DONE BIT(0)
  90. #define SDE_INTR_CTL_1_DONE BIT(1)
  91. #define SDE_INTR_CTL_2_DONE BIT(2)
  92. #define SDE_INTR_CTL_3_DONE BIT(3)
  93. #define SDE_INTR_CTL_4_DONE BIT(4)
  94. #define SDE_INTR_CTL_5_DONE BIT(5)
  95. /**
  96. * Concurrent WB overflow interrupt status bit definitions
  97. */
  98. #define SDE_INTR_CWB_OVERFLOW BIT(29)
  99. /**
  100. * Histogram VIG done interrupt status bit definitions
  101. */
  102. #define SDE_INTR_HIST_VIG_0_DONE BIT(0)
  103. #define SDE_INTR_HIST_VIG_1_DONE BIT(4)
  104. #define SDE_INTR_HIST_VIG_2_DONE BIT(8)
  105. #define SDE_INTR_HIST_VIG_3_DONE BIT(10)
  106. /**
  107. * Histogram VIG reset Sequence done interrupt status bit definitions
  108. */
  109. #define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
  110. #define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
  111. #define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
  112. #define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
  113. /**
  114. * Histogram DSPP done interrupt status bit definitions
  115. */
  116. #define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
  117. #define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
  118. #define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
  119. #define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
  120. /**
  121. * Histogram DSPP reset Sequence done interrupt status bit definitions
  122. */
  123. #define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
  124. #define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
  125. #define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
  126. #define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
  127. /**
  128. * INTF interrupt status bit definitions
  129. */
  130. #define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
  131. #define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
  132. #define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
  133. #define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
  134. #define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
  135. #define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
  136. #define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
  137. #define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
  138. #define SDE_INTR_PROG_LINE BIT(8)
  139. #define SDE_INTR_INTF_WD_TIMER_0_DONE BIT(13)
  140. /**
  141. * AD4 interrupt status bit definitions
  142. */
  143. #define SDE_INTR_BRIGHTPR_UPDATED BIT(4)
  144. #define SDE_INTR_DARKENH_UPDATED BIT(3)
  145. #define SDE_INTR_STREN_OUTROI_UPDATED BIT(2)
  146. #define SDE_INTR_STREN_INROI_UPDATED BIT(1)
  147. #define SDE_INTR_BACKLIGHT_UPDATED BIT(0)
  148. /**
  149. * INTF Tear IRQ register bit definitions
  150. */
  151. #define SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE BIT(0)
  152. #define SDE_INTR_INTF_TEAR_WR_PTR BIT(1)
  153. #define SDE_INTR_INTF_TEAR_RD_PTR BIT(2)
  154. #define SDE_INTR_INTF_TEAR_TE_DETECTED BIT(3)
  155. #define SDE_INTR_INTF_TEAR_TEAR_DETECTED BIT(4)
  156. /**
  157. * LTM interrupt status bit definitions
  158. */
  159. #define SDE_INTR_LTM_STATS_DONE BIT(0)
  160. #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
  161. /**
  162. * WB interrupt status bit definitions
  163. */
  164. #define SDE_INTR_WB_PROG_LINE BIT(0)
  165. /**
  166. * struct sde_intr_reg - array of SDE register sets
  167. * @clr_off: offset to CLEAR reg
  168. * @en_off: offset to ENABLE reg
  169. * @status_off: offset to STATUS reg
  170. * @map_idx_start first offset in the sde_irq_map table
  171. * @map_idx_end last offset in the sde_irq_map table
  172. */
  173. struct sde_intr_reg {
  174. u32 clr_off;
  175. u32 en_off;
  176. u32 status_off;
  177. u32 map_idx_start;
  178. u32 map_idx_end;
  179. };
  180. /**
  181. * struct sde_irq_type - maps each irq with i/f
  182. * @intr_type: type of interrupt listed in sde_intr_type
  183. * @instance_idx: instance index of the associated HW block in SDE
  184. * @irq_mask: corresponding bit in the interrupt status reg
  185. * @reg_idx: index in the 'sde_irq_tbl' table, to know which
  186. * registers offsets to use.
  187. */
  188. struct sde_irq_type {
  189. u32 intr_type;
  190. u32 instance_idx;
  191. u32 irq_mask;
  192. int reg_idx;
  193. };
  194. /**
  195. * IRQ mapping tables - use for lookup an irq_idx in this table that have
  196. * a matching interface type and instance index.
  197. * Each of these tables are copied to a dynamically allocated
  198. * table, that will be used to service each of the irqs
  199. * -1 indicates an uninitialized value which should be set when copying
  200. * these tables to the sde_irq_map.
  201. */
  202. static struct sde_irq_type sde_irq_intr_map[] = {
  203. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
  204. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
  205. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
  206. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
  207. { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
  208. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
  209. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
  210. { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
  211. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
  212. SDE_INTR_PING_PONG_0_DONE, -1},
  213. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
  214. SDE_INTR_PING_PONG_1_DONE, -1},
  215. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
  216. SDE_INTR_PING_PONG_2_DONE, -1},
  217. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
  218. SDE_INTR_PING_PONG_3_DONE, -1},
  219. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
  220. SDE_INTR_PING_PONG_0_RD_PTR, -1},
  221. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
  222. SDE_INTR_PING_PONG_1_RD_PTR, -1},
  223. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
  224. SDE_INTR_PING_PONG_2_RD_PTR, -1},
  225. { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
  226. SDE_INTR_PING_PONG_3_RD_PTR, -1},
  227. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
  228. SDE_INTR_PING_PONG_0_WR_PTR, -1},
  229. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
  230. SDE_INTR_PING_PONG_1_WR_PTR, -1},
  231. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
  232. SDE_INTR_PING_PONG_2_WR_PTR, -1},
  233. { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
  234. SDE_INTR_PING_PONG_3_WR_PTR, -1},
  235. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
  236. SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
  237. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
  238. SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
  239. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
  240. SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
  241. { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
  242. SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
  243. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
  244. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
  245. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
  246. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
  247. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
  248. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
  249. { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
  250. { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
  251. };
  252. static struct sde_irq_type sde_irq_intr2_map[] = {
  253. { SDE_IRQ_TYPE_CTL_START, CTL_0,
  254. SDE_INTR_CTL_0_START, -1},
  255. { SDE_IRQ_TYPE_CTL_START, CTL_1,
  256. SDE_INTR_CTL_1_START, -1},
  257. { SDE_IRQ_TYPE_CTL_START, CTL_2,
  258. SDE_INTR_CTL_2_START, -1},
  259. { SDE_IRQ_TYPE_CTL_START, CTL_3,
  260. SDE_INTR_CTL_3_START, -1},
  261. { SDE_IRQ_TYPE_CTL_START, CTL_4,
  262. SDE_INTR_CTL_4_START, -1},
  263. { SDE_IRQ_TYPE_CTL_START, CTL_5,
  264. SDE_INTR_CTL_5_START, -1},
  265. { SDE_IRQ_TYPE_CTL_DONE, CTL_0,
  266. SDE_INTR_CTL_0_DONE, -1},
  267. { SDE_IRQ_TYPE_CTL_DONE, CTL_1,
  268. SDE_INTR_CTL_1_DONE, -1},
  269. { SDE_IRQ_TYPE_CTL_DONE, CTL_2,
  270. SDE_INTR_CTL_2_DONE, -1},
  271. { SDE_IRQ_TYPE_CTL_DONE, CTL_3,
  272. SDE_INTR_CTL_3_DONE, -1},
  273. { SDE_IRQ_TYPE_CTL_DONE, CTL_4,
  274. SDE_INTR_CTL_4_DONE, -1},
  275. { SDE_IRQ_TYPE_CTL_DONE, CTL_5,
  276. SDE_INTR_CTL_5_DONE, -1},
  277. { SDE_IRQ_TYPE_CWB_OVERFLOW, PINGPONG_CWB_0, SDE_INTR_CWB_OVERFLOW, -1},
  278. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
  279. SDE_INTR_PING_PONG_4_DONE, -1},
  280. { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
  281. SDE_INTR_PING_PONG_5_DONE, -1},
  282. };
  283. static struct sde_irq_type sde_irq_hist_map[] = {
  284. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
  285. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
  286. SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
  287. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
  288. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
  289. SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
  290. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
  291. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
  292. SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
  293. { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
  294. { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
  295. SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
  296. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
  297. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
  298. SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
  299. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
  300. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
  301. SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
  302. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
  303. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
  304. SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
  305. { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
  306. { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
  307. SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
  308. };
  309. static struct sde_irq_type sde_irq_intf_map[] = {
  310. { SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
  311. SDE_INTR_VIDEO_INTO_STATIC, -1},
  312. { SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
  313. SDE_INTR_VIDEO_OUTOF_STATIC, -1},
  314. { SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
  315. SDE_INTR_DSICMD_0_INTO_STATIC, -1},
  316. { SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
  317. SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
  318. { SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
  319. SDE_INTR_DSICMD_1_INTO_STATIC, -1},
  320. { SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
  321. SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
  322. { SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
  323. SDE_INTR_DSICMD_2_INTO_STATIC, -1},
  324. { SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
  325. SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
  326. { SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
  327. { SDE_IRQ_TYPE_WD_TIMER, -1, SDE_INTR_WD_TIMER_0_DONE, -1},
  328. };
  329. static struct sde_irq_type sde_irq_ad4_map[] = {
  330. { SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
  331. };
  332. static struct sde_irq_type sde_irq_intf_te_map[] = {
  333. { SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
  334. SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
  335. { SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
  336. SDE_INTR_INTF_TEAR_WR_PTR, -1},
  337. { SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
  338. SDE_INTR_INTF_TEAR_RD_PTR, -1},
  339. { SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
  340. SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
  341. };
  342. static struct sde_irq_type sde_irq_ltm_map[] = {
  343. { SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
  344. { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
  345. };
  346. static struct sde_irq_type sde_irq_wb_map[] = {
  347. { SDE_IRQ_TYPE_WB_PROG_LINE, -1, SDE_INTR_WB_PROG_LINE, -1},
  348. };
  349. static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
  350. enum sde_intr_type intr_type, u32 instance_idx)
  351. {
  352. int i;
  353. for (i = 0; i < intr->sde_irq_map_size; i++) {
  354. if (intr_type == intr->sde_irq_map[i].intr_type &&
  355. instance_idx == intr->sde_irq_map[i].instance_idx)
  356. return i;
  357. }
  358. pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
  359. intr_type, instance_idx);
  360. return -EINVAL;
  361. }
  362. static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
  363. void (*cbfunc)(void *, int),
  364. void *arg)
  365. {
  366. int reg_idx;
  367. int irq_idx;
  368. int start_idx;
  369. int end_idx;
  370. u32 irq_status;
  371. u32 enable_mask;
  372. unsigned long irq_flags;
  373. if (!intr)
  374. return;
  375. /*
  376. * The dispatcher will save the IRQ status before calling here.
  377. * Now need to go through each IRQ status and find matching
  378. * irq lookup index.
  379. */
  380. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  381. for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
  382. /*
  383. * Each Interrupt register has dynamic range of indexes,
  384. * initialized during hw_intr_init when sde_irq_tbl is created.
  385. */
  386. start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
  387. end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
  388. if (start_idx >= intr->sde_irq_map_size ||
  389. end_idx > intr->sde_irq_map_size)
  390. continue;
  391. /* Read interrupt status */
  392. irq_status = SDE_REG_READ(&intr->hw, intr->sde_irq_tbl[reg_idx].status_off);
  393. /* Read enable mask */
  394. enable_mask = SDE_REG_READ(&intr->hw, intr->sde_irq_tbl[reg_idx].en_off);
  395. /* and clear the interrupt */
  396. if (irq_status)
  397. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  398. irq_status);
  399. /* Finally update IRQ status based on enable mask */
  400. irq_status &= enable_mask;
  401. /*
  402. * Search through matching intr status from irq map.
  403. * start_idx and end_idx defined the search range in
  404. * the sde_irq_map.
  405. */
  406. for (irq_idx = start_idx;
  407. (irq_idx < end_idx) && irq_status;
  408. irq_idx++)
  409. if ((irq_status &
  410. intr->sde_irq_map[irq_idx].irq_mask) &&
  411. (intr->sde_irq_map[irq_idx].reg_idx ==
  412. reg_idx)) {
  413. /*
  414. * Once a match on irq mask, perform a callback
  415. * to the given cbfunc. cbfunc will take care
  416. * the interrupt status clearing. If cbfunc is
  417. * not provided, then the interrupt clearing
  418. * is here.
  419. */
  420. if (cbfunc)
  421. cbfunc(arg, irq_idx);
  422. else
  423. intr->ops.clear_intr_status_nolock(
  424. intr, irq_idx);
  425. /*
  426. * When callback finish, clear the irq_status
  427. * with the matching mask. Once irq_status
  428. * is all cleared, the search can be stopped.
  429. */
  430. irq_status &=
  431. ~intr->sde_irq_map[irq_idx].irq_mask;
  432. }
  433. }
  434. /* ensure register writes go through */
  435. wmb();
  436. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  437. }
  438. static int sde_hw_intr_enable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  439. {
  440. int reg_idx;
  441. const struct sde_intr_reg *reg;
  442. const struct sde_irq_type *irq;
  443. const char *dbgstr = NULL;
  444. uint32_t cache_irq_mask;
  445. if (!intr)
  446. return -EINVAL;
  447. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  448. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  449. return -EINVAL;
  450. }
  451. irq = &intr->sde_irq_map[irq_idx];
  452. reg_idx = irq->reg_idx;
  453. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  454. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  455. return -EINVAL;
  456. }
  457. reg = &intr->sde_irq_tbl[reg_idx];
  458. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  459. if (cache_irq_mask & irq->irq_mask) {
  460. dbgstr = "SDE IRQ already set:";
  461. } else {
  462. dbgstr = "SDE IRQ enabled:";
  463. cache_irq_mask |= irq->irq_mask;
  464. /* Cleaning any pending interrupt */
  465. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  466. /* Enabling interrupts with the new mask */
  467. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  468. /* ensure register write goes through */
  469. wmb();
  470. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  471. }
  472. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  473. irq->irq_mask, cache_irq_mask);
  474. return 0;
  475. }
  476. static int sde_hw_intr_disable_irq_nolock(struct sde_hw_intr *intr, int irq_idx)
  477. {
  478. int reg_idx;
  479. const struct sde_intr_reg *reg;
  480. const struct sde_irq_type *irq;
  481. const char *dbgstr = NULL;
  482. uint32_t cache_irq_mask;
  483. if (!intr)
  484. return -EINVAL;
  485. if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
  486. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  487. return -EINVAL;
  488. }
  489. irq = &intr->sde_irq_map[irq_idx];
  490. reg_idx = irq->reg_idx;
  491. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  492. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  493. return -EINVAL;
  494. }
  495. reg = &intr->sde_irq_tbl[reg_idx];
  496. cache_irq_mask = intr->cache_irq_mask[reg_idx];
  497. if ((cache_irq_mask & irq->irq_mask) == 0) {
  498. dbgstr = "SDE IRQ is already cleared:";
  499. } else {
  500. dbgstr = "SDE IRQ mask disable:";
  501. cache_irq_mask &= ~irq->irq_mask;
  502. /* Disable interrupts based on the new mask */
  503. SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
  504. /* Cleaning any pending interrupt */
  505. SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
  506. /* ensure register write goes through */
  507. wmb();
  508. intr->cache_irq_mask[reg_idx] = cache_irq_mask;
  509. }
  510. pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
  511. irq->irq_mask, cache_irq_mask);
  512. return 0;
  513. }
  514. static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
  515. {
  516. int i;
  517. if (!intr)
  518. return -EINVAL;
  519. for (i = 0; i < intr->sde_irq_size; i++)
  520. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
  521. 0xffffffff);
  522. /* ensure register writes go through */
  523. wmb();
  524. return 0;
  525. }
  526. static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
  527. {
  528. int i;
  529. if (!intr)
  530. return -EINVAL;
  531. for (i = 0; i < intr->sde_irq_size; i++)
  532. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
  533. 0x00000000);
  534. /* ensure register writes go through */
  535. wmb();
  536. return 0;
  537. }
  538. static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
  539. uint32_t *sources)
  540. {
  541. if (!intr || !sources)
  542. return -EINVAL;
  543. *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
  544. return 0;
  545. }
  546. static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
  547. int irq_idx)
  548. {
  549. int reg_idx;
  550. if (!intr)
  551. return;
  552. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  553. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  554. return;
  555. }
  556. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  557. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  558. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  559. return;
  560. }
  561. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  562. intr->sde_irq_map[irq_idx].irq_mask);
  563. /* ensure register writes go through */
  564. wmb();
  565. }
  566. static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
  567. int irq_idx)
  568. {
  569. unsigned long irq_flags;
  570. if (!intr)
  571. return;
  572. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  573. sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
  574. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  575. }
  576. static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
  577. int irq_idx, bool clear)
  578. {
  579. int reg_idx;
  580. u32 intr_status;
  581. if (!intr)
  582. return 0;
  583. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  584. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  585. return 0;
  586. }
  587. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  588. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  589. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  590. return 0;
  591. }
  592. intr_status = SDE_REG_READ(&intr->hw,
  593. intr->sde_irq_tbl[reg_idx].status_off) &
  594. intr->sde_irq_map[irq_idx].irq_mask;
  595. if (intr_status && clear)
  596. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  597. intr_status);
  598. /* ensure register writes go through */
  599. wmb();
  600. return intr_status;
  601. }
  602. static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
  603. int irq_idx, bool clear)
  604. {
  605. int reg_idx;
  606. unsigned long irq_flags;
  607. u32 intr_status;
  608. if (!intr)
  609. return 0;
  610. if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
  611. pr_err("invalid IRQ index: [%d]\n", irq_idx);
  612. return 0;
  613. }
  614. reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
  615. if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
  616. pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
  617. return 0;
  618. }
  619. spin_lock_irqsave(&intr->irq_lock, irq_flags);
  620. intr_status = SDE_REG_READ(&intr->hw,
  621. intr->sde_irq_tbl[reg_idx].status_off) &
  622. intr->sde_irq_map[irq_idx].irq_mask;
  623. if (intr_status && clear)
  624. SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
  625. intr_status);
  626. /* ensure register writes go through */
  627. wmb();
  628. spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
  629. return intr_status;
  630. }
  631. static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
  632. struct sde_intr_irq_offsets *item)
  633. {
  634. u32 base_offset;
  635. if (!sde_irq || !item)
  636. return -EINVAL;
  637. base_offset = item->base_offset;
  638. switch (item->instance_idx) {
  639. case SDE_INTR_TOP_INTR:
  640. sde_irq->clr_off = base_offset + INTR_CLEAR;
  641. sde_irq->en_off = base_offset + INTR_EN;
  642. sde_irq->status_off = base_offset + INTR_STATUS;
  643. break;
  644. case SDE_INTR_TOP_INTR2:
  645. sde_irq->clr_off = base_offset + INTR2_CLEAR;
  646. sde_irq->en_off = base_offset + INTR2_EN;
  647. sde_irq->status_off = base_offset + INTR2_STATUS;
  648. break;
  649. case SDE_INTR_TOP_HIST_INTR:
  650. sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
  651. sde_irq->en_off = base_offset + HIST_INTR_EN;
  652. sde_irq->status_off = base_offset + HIST_INTR_STATUS;
  653. break;
  654. default:
  655. pr_err("invalid TOP intr for instance %d\n",
  656. item->instance_idx);
  657. return -EINVAL;
  658. }
  659. return 0;
  660. }
  661. static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
  662. struct sde_intr_irq_offsets *item)
  663. {
  664. u32 base_offset, rc = 0;
  665. if (!sde_irq || !item)
  666. return -EINVAL;
  667. base_offset = item->base_offset;
  668. switch (item->type) {
  669. case SDE_INTR_HWBLK_TOP:
  670. rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
  671. break;
  672. case SDE_INTR_HWBLK_INTF:
  673. sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
  674. sde_irq->en_off = base_offset + INTF_INTR_EN;
  675. sde_irq->status_off = base_offset + INTF_INTR_STATUS;
  676. break;
  677. case SDE_INTR_HWBLK_AD4:
  678. sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
  679. sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
  680. sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
  681. break;
  682. case SDE_INTR_HWBLK_INTF_TEAR:
  683. sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
  684. sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
  685. sde_irq->status_off = base_offset +
  686. MDP_INTF_TEAR_INTR_STATUS_OFF;
  687. break;
  688. case SDE_INTR_HWBLK_LTM:
  689. sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
  690. sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
  691. sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
  692. break;
  693. case SDE_INTR_HWBLK_WB:
  694. sde_irq->clr_off = base_offset + MDP_WB_INTR_CLEAR_OFF;
  695. sde_irq->en_off = base_offset + MDP_WB_INTR_EN_OFF;
  696. sde_irq->status_off = base_offset + MDP_WB_INTR_STATUS_OFF;
  697. break;
  698. default:
  699. pr_err("unrecognized intr blk type %d\n",
  700. item->type);
  701. rc = -EINVAL;
  702. }
  703. return rc;
  704. }
  705. static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
  706. {
  707. ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
  708. ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
  709. ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
  710. ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
  711. ops->clear_all_irqs = sde_hw_intr_clear_irqs;
  712. ops->disable_all_irqs = sde_hw_intr_disable_irqs;
  713. ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
  714. ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
  715. ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
  716. ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
  717. ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
  718. }
  719. static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
  720. void __iomem *addr, struct sde_hw_blk_reg_map *hw)
  721. {
  722. if (!m || !addr || !hw || m->mdp_count == 0)
  723. return NULL;
  724. hw->base_off = addr;
  725. hw->blk_off = m->mdss[0].base;
  726. hw->hw_rev = m->hw_rev;
  727. return &m->mdss[0];
  728. }
  729. void sde_hw_intr_destroy(struct sde_hw_intr *intr)
  730. {
  731. if (intr) {
  732. kfree(intr->sde_irq_tbl);
  733. kfree(intr->sde_irq_map);
  734. kfree(intr->cache_irq_mask);
  735. kfree(intr);
  736. }
  737. }
  738. static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
  739. {
  740. u32 ret = 0;
  741. switch (inst) {
  742. case SDE_INTR_TOP_INTR:
  743. ret = ARRAY_SIZE(sde_irq_intr_map);
  744. break;
  745. case SDE_INTR_TOP_INTR2:
  746. ret = ARRAY_SIZE(sde_irq_intr2_map);
  747. break;
  748. case SDE_INTR_TOP_HIST_INTR:
  749. ret = ARRAY_SIZE(sde_irq_hist_map);
  750. break;
  751. default:
  752. pr_err("invalid top inst:%d\n", inst);
  753. }
  754. return ret;
  755. }
  756. static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
  757. {
  758. u32 ret = 0;
  759. switch (item->type) {
  760. case SDE_INTR_HWBLK_TOP:
  761. ret = _get_irq_map_size_top(item->instance_idx);
  762. break;
  763. case SDE_INTR_HWBLK_INTF:
  764. ret = ARRAY_SIZE(sde_irq_intf_map);
  765. break;
  766. case SDE_INTR_HWBLK_AD4:
  767. ret = ARRAY_SIZE(sde_irq_ad4_map);
  768. break;
  769. case SDE_INTR_HWBLK_INTF_TEAR:
  770. ret = ARRAY_SIZE(sde_irq_intf_te_map);
  771. break;
  772. case SDE_INTR_HWBLK_LTM:
  773. ret = ARRAY_SIZE(sde_irq_ltm_map);
  774. break;
  775. case SDE_INTR_HWBLK_WB:
  776. ret = ARRAY_SIZE(sde_irq_wb_map);
  777. break;
  778. default:
  779. pr_err("invalid type: %d\n", item->type);
  780. }
  781. return ret;
  782. }
  783. static inline struct sde_irq_type *_get_irq_map_addr_top(
  784. enum sde_intr_top_intr inst)
  785. {
  786. struct sde_irq_type *ret = NULL;
  787. switch (inst) {
  788. case SDE_INTR_TOP_INTR:
  789. ret = sde_irq_intr_map;
  790. break;
  791. case SDE_INTR_TOP_INTR2:
  792. ret = sde_irq_intr2_map;
  793. break;
  794. case SDE_INTR_TOP_HIST_INTR:
  795. ret = sde_irq_hist_map;
  796. break;
  797. default:
  798. pr_err("invalid top inst:%d\n", inst);
  799. }
  800. return ret;
  801. }
  802. static inline struct sde_irq_type *_get_irq_map_addr(
  803. struct sde_intr_irq_offsets *item)
  804. {
  805. struct sde_irq_type *ret = NULL;
  806. switch (item->type) {
  807. case SDE_INTR_HWBLK_TOP:
  808. ret = _get_irq_map_addr_top(item->instance_idx);
  809. break;
  810. case SDE_INTR_HWBLK_INTF:
  811. ret = sde_irq_intf_map;
  812. break;
  813. case SDE_INTR_HWBLK_AD4:
  814. ret = sde_irq_ad4_map;
  815. break;
  816. case SDE_INTR_HWBLK_INTF_TEAR:
  817. ret = sde_irq_intf_te_map;
  818. break;
  819. case SDE_INTR_HWBLK_LTM:
  820. ret = sde_irq_ltm_map;
  821. break;
  822. case SDE_INTR_HWBLK_WB:
  823. ret = sde_irq_wb_map;
  824. break;
  825. default:
  826. pr_err("invalid type: %d\n", item->type);
  827. }
  828. return ret;
  829. }
  830. static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
  831. struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
  832. {
  833. int i, j = 0;
  834. struct sde_irq_type *src = _get_irq_map_addr(item);
  835. u32 src_size = _get_irq_map_size(item);
  836. if (!src)
  837. return -EINVAL;
  838. if (low_idx >= size || high_idx > size ||
  839. (high_idx - low_idx > src_size)) {
  840. pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
  841. low_idx, high_idx, size, src_size);
  842. return -EINVAL;
  843. }
  844. for (i = low_idx; i < high_idx; i++)
  845. sde_irq_map[i] = src[j++];
  846. return 0;
  847. }
  848. static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
  849. struct sde_mdss_cfg *m)
  850. {
  851. struct sde_intr_irq_offsets *item;
  852. int i, sde_irq_tbl_idx = 0, ret = 0;
  853. u32 low_idx, high_idx;
  854. u32 sde_irq_map_idx = 0;
  855. /* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
  856. list_for_each_entry(item, &m->irq_offset_list, list) {
  857. low_idx = sde_irq_map_idx;
  858. high_idx = low_idx + _get_irq_map_size(item);
  859. if (sde_irq_tbl_idx >= intr->sde_irq_size ||
  860. sde_irq_tbl_idx < 0) {
  861. ret = -EINVAL;
  862. goto exit;
  863. }
  864. /* init sde_irq_map with the global irq mapping table */
  865. if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
  866. item, low_idx, high_idx)) {
  867. ret = -EINVAL;
  868. goto exit;
  869. }
  870. /* init irq map with its reg & instance idxs in the irq tbl */
  871. for (i = low_idx; i < high_idx; i++) {
  872. intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
  873. if (item->type != SDE_INTR_HWBLK_TOP)
  874. intr->sde_irq_map[i].instance_idx =
  875. item->instance_idx;
  876. pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
  877. i, sde_irq_tbl_idx, item->instance_idx);
  878. }
  879. /* track the idx of the mapping table for this irq in
  880. * sde_irq_map, this to only access the indexes of this
  881. * irq during the irq dispatch
  882. */
  883. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
  884. intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
  885. ret = _set_sde_irq_tbl_offset(
  886. &intr->sde_irq_tbl[sde_irq_tbl_idx], item);
  887. if (ret)
  888. goto exit;
  889. /* increment idx for both tables accordingly */
  890. sde_irq_tbl_idx++;
  891. sde_irq_map_idx = high_idx;
  892. }
  893. exit:
  894. sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
  895. return ret;
  896. }
  897. struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  898. struct sde_mdss_cfg *m)
  899. {
  900. struct sde_hw_intr *intr = NULL;
  901. struct sde_mdss_base_cfg *cfg;
  902. struct sde_intr_irq_offsets *item;
  903. u32 irq_regs_count = 0;
  904. u32 irq_map_count = 0;
  905. u32 size;
  906. int ret = 0;
  907. if (!addr || !m) {
  908. ret = -EINVAL;
  909. goto exit;
  910. }
  911. intr = kzalloc(sizeof(*intr), GFP_KERNEL);
  912. if (!intr) {
  913. ret = -ENOMEM;
  914. goto exit;
  915. }
  916. cfg = __intr_offset(m, addr, &intr->hw);
  917. if (!cfg) {
  918. ret = -EINVAL;
  919. goto exit;
  920. }
  921. __setup_intr_ops(&intr->ops);
  922. /* check how many irq's this target supports */
  923. list_for_each_entry(item, &m->irq_offset_list, list) {
  924. size = _get_irq_map_size(item);
  925. if (!size || irq_map_count >= UINT_MAX - size) {
  926. pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
  927. irq_regs_count, item->type, item->instance_idx,
  928. size, irq_map_count);
  929. ret = -EINVAL;
  930. goto exit;
  931. }
  932. irq_regs_count++;
  933. irq_map_count += size;
  934. }
  935. if (irq_regs_count == 0 || irq_map_count == 0) {
  936. pr_err("invalid irq map: %d %d\n",
  937. irq_regs_count, irq_map_count);
  938. ret = -EINVAL;
  939. goto exit;
  940. }
  941. /* Allocate table for the irq registers */
  942. intr->sde_irq_size = irq_regs_count;
  943. intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
  944. GFP_KERNEL);
  945. if (intr->sde_irq_tbl == NULL) {
  946. ret = -ENOMEM;
  947. goto exit;
  948. }
  949. /* Allocate table with the valid interrupts bits */
  950. intr->sde_irq_map_size = irq_map_count;
  951. intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
  952. GFP_KERNEL);
  953. if (intr->sde_irq_map == NULL) {
  954. ret = -ENOMEM;
  955. goto exit;
  956. }
  957. /* Initialize IRQs tables */
  958. ret = _sde_hw_intr_init_irq_tables(intr, m);
  959. if (ret)
  960. goto exit;
  961. intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
  962. sizeof(*intr->cache_irq_mask), GFP_KERNEL);
  963. if (intr->cache_irq_mask == NULL) {
  964. ret = -ENOMEM;
  965. goto exit;
  966. }
  967. spin_lock_init(&intr->irq_lock);
  968. exit:
  969. if (ret) {
  970. sde_hw_intr_destroy(intr);
  971. return ERR_PTR(ret);
  972. }
  973. return intr;
  974. }