sde_hw_top.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_hwio.h"
  8. #include "sde_hw_catalog.h"
  9. #include "sde_hw_top.h"
  10. #include "sde_dbg.h"
  11. #include "sde_kms.h"
  12. #define SSPP_SPARE 0x28
  13. #define UBWC_DEC_HW_VERSION 0x058
  14. #define UBWC_STATIC 0x144
  15. #define UBWC_CTRL_2 0x150
  16. #define UBWC_PREDICTION_MODE 0x154
  17. #define FLD_SPLIT_DISPLAY_CMD BIT(1)
  18. #define FLD_SMART_PANEL_FREE_RUN BIT(2)
  19. #define FLD_INTF_1_SW_TRG_MUX BIT(4)
  20. #define FLD_INTF_2_SW_TRG_MUX BIT(8)
  21. #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
  22. #define MDP_DSPP_DBGBUS_CTRL 0x348
  23. #define MDP_DSPP_DBGBUS_STATUS 0x34C
  24. #define DANGER_STATUS 0x360
  25. #define SAFE_STATUS 0x364
  26. #define TE_LINE_INTERVAL 0x3F4
  27. #define TRAFFIC_SHAPER_EN BIT(31)
  28. #define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
  29. #define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
  30. #define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
  31. #define MDP_WD_TIMER_0_CTL 0x380
  32. #define MDP_WD_TIMER_0_CTL2 0x384
  33. #define MDP_WD_TIMER_0_LOAD_VALUE 0x388
  34. #define MDP_WD_TIMER_1_CTL 0x390
  35. #define MDP_WD_TIMER_1_CTL2 0x394
  36. #define MDP_WD_TIMER_1_LOAD_VALUE 0x398
  37. #define MDP_PERIPH_DBGBUS_CTRL 0x418
  38. #define MDP_WD_TIMER_2_CTL 0x420
  39. #define MDP_WD_TIMER_2_CTL2 0x424
  40. #define MDP_WD_TIMER_2_LOAD_VALUE 0x428
  41. #define MDP_WD_TIMER_3_CTL 0x430
  42. #define MDP_WD_TIMER_3_CTL2 0x434
  43. #define MDP_WD_TIMER_3_LOAD_VALUE 0x438
  44. #define MDP_WD_TIMER_4_CTL 0x440
  45. #define MDP_WD_TIMER_4_CTL2 0x444
  46. #define MDP_WD_TIMER_4_LOAD_VALUE 0x448
  47. #define MDP_PERIPH_TOP0 0x380
  48. #define MDP_SSPP_TOP2 0x3A8
  49. #define AUTOREFRESH_TEST_POINT 0x2
  50. #define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  51. #define DCE_SEL 0x450
  52. #define MDP_SID_V2_VIG0 0x000
  53. #define MDP_SID_V2_DMA0 0x040
  54. #define MDP_SID_V2_CTL_0 0x100
  55. #define MDP_SID_V2_LTM0 0x400
  56. #define MDP_SID_V2_IPC_READ 0x200
  57. #define MDP_SID_V2_LUTDMA_RD 0x300
  58. #define MDP_SID_V2_LUTDMA_WR 0x304
  59. #define MDP_SID_V2_LUTDMA_SB_RD 0x308
  60. #define MDP_SID_V2_LUTDMA_VM_0 0x310
  61. #define MDP_SID_V2_DSI0 0x500
  62. #define MDP_SID_V2_DSI1 0x504
  63. #define MDP_SID_VIG0 0x0
  64. #define MDP_SID_VIG1 0x4
  65. #define MDP_SID_VIG2 0x8
  66. #define MDP_SID_VIG3 0xC
  67. #define MDP_SID_DMA0 0x10
  68. #define MDP_SID_DMA1 0x14
  69. #define MDP_SID_DMA2 0x18
  70. #define MDP_SID_DMA3 0x1C
  71. #define MDP_SID_ROT_RD 0x20
  72. #define MDP_SID_ROT_WR 0x24
  73. #define MDP_SID_WB2 0x28
  74. #define MDP_SID_XIN7 0x2C
  75. #define ROT_SID_ID_VAL 0x1c
  76. /* HW Fences */
  77. #define MDP_CTL_HW_FENCE_CTRL 0x14000
  78. #define MDP_CTL_HW_FENCE_ID_START_ADDR 0x14004
  79. #define MDP_CTL_HW_FENCE_ID_STATUS 0x14008
  80. #define MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL 0x1400c
  81. #define MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP0 0x14010
  82. #define MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP1 0x14014
  83. #define MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP0 0x14018
  84. #define MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP1 0x1401c
  85. #define MDP_CTL_HW_FENCE_QOS 0x14020
  86. #define MDP_CTL_HW_FENCE_IDn_ISR 0x14050
  87. #define MDP_CTL_HW_FENCE_IDm_ADDR 0x14054
  88. #define MDP_CTL_HW_FENCE_IDm_DATA 0x14058
  89. #define MDP_CTL_HW_FENCE_IDm_MASK 0x1405c
  90. #define MDP_CTL_HW_FENCE_IDm_ATTR 0x14060
  91. #define HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ba, p, c) ((ba+0xc) + (0x40000*p) + (0x1000*c))
  92. #define HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ba, p, c) ((ba+0x10) + (0x40000*p) + (0x1000*c))
  93. #define MDP_CTL_HW_FENCE_ID_OFFSET_n(base, n) (base + (0x14*n))
  94. #define MDP_CTL_HW_FENCE_ID_OFFSET_m(base, m) (base + (0x14*m))
  95. #define MDP_CTL_FENCE_ATTRS(devicetype, size, resp_req) \
  96. (((resp_req & 0x1) << 16) | ((size & 0x7) << 4) | (devicetype & 0xf))
  97. #define MDP_CTL_FENCE_ISR_OP_CODE(opcode, op0, op1, op2) \
  98. (((op2 & 0xff) << 24) | ((op1 & 0xff) << 16) | ((op0 & 0xff) << 8) | (opcode & 0xff))
  99. #define HW_FENCE_DPU_INPUT_FENCE_START_N 0
  100. #define HW_FENCE_DPU_OUTPUT_FENCE_START_N 4
  101. #define HW_FENCE_IPCC_FENCE_PROTOCOL_ID 4
  102. #define HW_FENCE_DPU_FENCE_PROTOCOL_ID 3
  103. #define HW_FENCE_QOS_PRIORITY 0x7
  104. #define HW_FENCE_QOS_PRIORITY_LVL 0x0
  105. static int ppb_offset_map[PINGPONG_MAX] = {1, 0, 3, 2, 5, 4, 7, 7, 6, 6, -1, -1};
  106. static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
  107. struct split_pipe_cfg *cfg)
  108. {
  109. struct sde_hw_blk_reg_map *c;
  110. u32 upper_pipe = 0;
  111. u32 lower_pipe = 0;
  112. if (!mdp || !cfg)
  113. return;
  114. c = &mdp->hw;
  115. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &mdp->caps->features) && cfg->en) {
  116. /* avoid programming of legacy bits like SW_TRG_MUX for new targets */
  117. if (cfg->mode == INTF_MODE_CMD) {
  118. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  119. upper_pipe = lower_pipe;
  120. if (cfg->pp_split_slave != INTF_MAX)
  121. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  122. }
  123. } else if (cfg->en) {
  124. if (cfg->mode == INTF_MODE_CMD) {
  125. lower_pipe = FLD_SPLIT_DISPLAY_CMD;
  126. /* interface controlling sw trigger */
  127. if (cfg->intf == INTF_2)
  128. lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
  129. else
  130. lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
  131. /* free run */
  132. if (cfg->pp_split_slave != INTF_MAX)
  133. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  134. upper_pipe = lower_pipe;
  135. /* smart panel align mode */
  136. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  137. } else {
  138. if (cfg->intf == INTF_2) {
  139. lower_pipe = FLD_INTF_1_SW_TRG_MUX;
  140. upper_pipe = FLD_INTF_2_SW_TRG_MUX;
  141. } else {
  142. lower_pipe = FLD_INTF_2_SW_TRG_MUX;
  143. upper_pipe = FLD_INTF_1_SW_TRG_MUX;
  144. }
  145. }
  146. }
  147. SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
  148. SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
  149. SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
  150. SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
  151. }
  152. static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
  153. struct split_pipe_cfg *cfg)
  154. {
  155. u32 ppb_config = 0x0;
  156. u32 ppb_control = 0x0;
  157. if (!mdp || !cfg)
  158. return;
  159. if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
  160. ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
  161. ppb_config |= BIT(16); /* split enable */
  162. ppb_control = BIT(5); /* horz split*/
  163. }
  164. if (cfg->pp_split_index) {
  165. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
  166. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
  167. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
  168. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
  169. } else {
  170. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
  171. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
  172. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
  173. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
  174. }
  175. }
  176. static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
  177. struct cdm_output_cfg *cfg)
  178. {
  179. struct sde_hw_blk_reg_map *c;
  180. u32 out_ctl = 0;
  181. if (!mdp || !cfg)
  182. return;
  183. c = &mdp->hw;
  184. if (cfg->wb_en)
  185. out_ctl |= BIT(24);
  186. else if (cfg->intf_en)
  187. out_ctl |= BIT(19);
  188. SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
  189. }
  190. static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
  191. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  192. {
  193. struct sde_hw_blk_reg_map *c;
  194. u32 reg_off, bit_off;
  195. u32 reg_val, new_val;
  196. bool clk_forced_on;
  197. if (!mdp)
  198. return false;
  199. c = &mdp->hw;
  200. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
  201. return false;
  202. reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
  203. bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
  204. reg_val = SDE_REG_READ(c, reg_off);
  205. if (enable)
  206. new_val = reg_val | BIT(bit_off);
  207. else
  208. new_val = reg_val & ~BIT(bit_off);
  209. SDE_REG_WRITE(c, reg_off, new_val);
  210. wmb(); /* ensure write finished before progressing */
  211. clk_forced_on = !(reg_val & BIT(bit_off));
  212. return clk_forced_on;
  213. }
  214. static int sde_hw_get_clk_ctrl_status(struct sde_hw_mdp *mdp,
  215. enum sde_clk_ctrl_type clk_ctrl, bool *status)
  216. {
  217. struct sde_hw_blk_reg_map *c;
  218. u32 reg_off, bit_off;
  219. if (!mdp)
  220. return -EINVAL;
  221. c = &mdp->hw;
  222. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX ||
  223. !mdp->caps->clk_status[clk_ctrl].reg_off)
  224. return -EINVAL;
  225. reg_off = mdp->caps->clk_status[clk_ctrl].reg_off;
  226. bit_off = mdp->caps->clk_status[clk_ctrl].bit_off;
  227. *status = SDE_REG_READ(c, reg_off) & BIT(bit_off);
  228. return 0;
  229. }
  230. static void _update_vsync_source(struct sde_hw_mdp *mdp,
  231. struct sde_vsync_source_cfg *cfg)
  232. {
  233. struct sde_hw_blk_reg_map *c;
  234. u32 reg, wd_load_value, wd_ctl, wd_ctl2;
  235. if (!mdp || !cfg)
  236. return;
  237. c = &mdp->hw;
  238. if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
  239. cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
  240. switch (cfg->vsync_source) {
  241. case SDE_VSYNC_SOURCE_WD_TIMER_4:
  242. wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
  243. wd_ctl = MDP_WD_TIMER_4_CTL;
  244. wd_ctl2 = MDP_WD_TIMER_4_CTL2;
  245. break;
  246. case SDE_VSYNC_SOURCE_WD_TIMER_3:
  247. wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
  248. wd_ctl = MDP_WD_TIMER_3_CTL;
  249. wd_ctl2 = MDP_WD_TIMER_3_CTL2;
  250. break;
  251. case SDE_VSYNC_SOURCE_WD_TIMER_2:
  252. wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
  253. wd_ctl = MDP_WD_TIMER_2_CTL;
  254. wd_ctl2 = MDP_WD_TIMER_2_CTL2;
  255. break;
  256. case SDE_VSYNC_SOURCE_WD_TIMER_1:
  257. wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
  258. wd_ctl = MDP_WD_TIMER_1_CTL;
  259. wd_ctl2 = MDP_WD_TIMER_1_CTL2;
  260. break;
  261. case SDE_VSYNC_SOURCE_WD_TIMER_0:
  262. default:
  263. wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
  264. wd_ctl = MDP_WD_TIMER_0_CTL;
  265. wd_ctl2 = MDP_WD_TIMER_0_CTL2;
  266. break;
  267. }
  268. SDE_REG_WRITE(c, wd_load_value, CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
  269. SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
  270. reg = SDE_REG_READ(c, wd_ctl2);
  271. reg |= BIT(8); /* enable heartbeat timer */
  272. reg |= BIT(0); /* enable WD timer */
  273. SDE_REG_WRITE(c, wd_ctl2, reg);
  274. /* make sure that timers are enabled/disabled for vsync state */
  275. wmb();
  276. }
  277. }
  278. static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
  279. struct sde_vsync_source_cfg *cfg)
  280. {
  281. struct sde_hw_blk_reg_map *c;
  282. u32 reg, i;
  283. static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
  284. if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
  285. return;
  286. c = &mdp->hw;
  287. reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
  288. for (i = 0; i < cfg->pp_count; i++) {
  289. int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
  290. if (pp_idx >= ARRAY_SIZE(pp_offset))
  291. continue;
  292. reg &= ~(0xf << pp_offset[pp_idx]);
  293. reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
  294. }
  295. SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
  296. _update_vsync_source(mdp, cfg);
  297. }
  298. static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
  299. struct sde_vsync_source_cfg *cfg)
  300. {
  301. _update_vsync_source(mdp, cfg);
  302. }
  303. void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
  304. {
  305. struct sde_hw_blk_reg_map c;
  306. u32 ubwc_dec_version;
  307. u32 ubwc_enc_version;
  308. if (!mdp || !m)
  309. return;
  310. /* force blk offset to zero to access beginning of register region */
  311. c = mdp->hw;
  312. c.blk_off = 0x0;
  313. ubwc_dec_version = SDE_REG_READ(&c, UBWC_DEC_HW_VERSION);
  314. /* global ubwc version used in input fb encoding */
  315. ubwc_enc_version = m->ubwc_rev;
  316. if (IS_UBWC_40_SUPPORTED(ubwc_dec_version) || IS_UBWC_43_SUPPORTED(ubwc_dec_version)) {
  317. /* for UBWC 2.0 ver = 0, mode = 0 will be programmed */
  318. u32 ver = 0;
  319. u32 mode = 0;
  320. u32 reg = (m->mdp[0].ubwc_swizzle & 0x7) |
  321. ((m->mdp[0].ubwc_static & 0x1) << 3) |
  322. ((m->mdp[0].highest_bank_bit & 0x7) << 4) |
  323. ((m->macrotile_mode & 0x1) << 12);
  324. if (IS_UBWC_43_SUPPORTED(ubwc_enc_version)) {
  325. ver = 3;
  326. mode = 1;
  327. } else if (IS_UBWC_40_SUPPORTED(ubwc_enc_version)) {
  328. ver = 2;
  329. mode = 1;
  330. } else if (IS_UBWC_30_SUPPORTED(ubwc_enc_version)) {
  331. ver = 1;
  332. }
  333. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  334. SDE_REG_WRITE(&c, UBWC_CTRL_2, ver);
  335. SDE_REG_WRITE(&c, UBWC_PREDICTION_MODE, mode);
  336. } else if (IS_UBWC_20_SUPPORTED(ubwc_dec_version)) {
  337. SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
  338. } else if (IS_UBWC_30_SUPPORTED(ubwc_dec_version)) {
  339. u32 reg = m->mdp[0].ubwc_static |
  340. (m->mdp[0].ubwc_swizzle & 0x1) |
  341. ((m->mdp[0].highest_bank_bit & 0x3) << 4) |
  342. ((m->macrotile_mode & 0x1) << 12);
  343. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version))
  344. reg |= BIT(10);
  345. if (IS_UBWC_10_SUPPORTED(ubwc_enc_version))
  346. reg |= BIT(8);
  347. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  348. } else {
  349. SDE_ERROR("unsupported ubwc decoder version 0x%08x\n", ubwc_dec_version);
  350. }
  351. }
  352. static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp)
  353. {
  354. struct sde_hw_blk_reg_map *c;
  355. if (!mdp)
  356. return;
  357. c = &mdp->hw;
  358. SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
  359. }
  360. static void sde_hw_mdp_events(struct sde_hw_mdp *mdp, bool enable)
  361. {
  362. struct sde_hw_blk_reg_map *c;
  363. if (!mdp)
  364. return;
  365. c = &mdp->hw;
  366. SDE_REG_WRITE(c, HW_EVENTS_CTL, enable);
  367. }
  368. void sde_hw_set_vm_sid_v2(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  369. {
  370. u32 offset = 0;
  371. int i;
  372. if (!sid || !m)
  373. return;
  374. for (i = 0; i < m->ctl_count; i++) {
  375. offset = MDP_SID_V2_CTL_0 + (i * 4);
  376. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  377. }
  378. for (i = 0; i < m->ltm_count; i++) {
  379. offset = MDP_SID_V2_LTM0 + (i * 4);
  380. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  381. }
  382. if (SDE_HW_MAJOR(sid->hw.hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_A00)) {
  383. for (i = 0; i < m->ctl_count; i++) {
  384. offset = MDP_SID_V2_LUTDMA_VM_0 + (i * 4);
  385. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  386. }
  387. }
  388. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_IPC_READ, vm << 2);
  389. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_RD, vm << 2);
  390. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_WR, vm << 2);
  391. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_SB_RD, vm << 2);
  392. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI0, vm << 2);
  393. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI1, vm << 2);
  394. }
  395. void sde_hw_set_vm_sid(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  396. {
  397. if (!sid || !m)
  398. return;
  399. SDE_REG_WRITE(&sid->hw, MDP_SID_XIN7, vm << 2);
  400. }
  401. struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
  402. u32 sid_len, const struct sde_mdss_cfg *m)
  403. {
  404. struct sde_hw_sid *c;
  405. c = kzalloc(sizeof(*c), GFP_KERNEL);
  406. if (!c)
  407. return ERR_PTR(-ENOMEM);
  408. c->hw.base_off = addr;
  409. c->hw.blk_off = 0;
  410. c->hw.length = sid_len;
  411. c->hw.hw_rev = m->hw_rev;
  412. c->hw.log_mask = SDE_DBG_MASK_SID;
  413. if (IS_SDE_SID_REV_200(m->sid_rev))
  414. c->ops.set_vm_sid = sde_hw_set_vm_sid_v2;
  415. else
  416. c->ops.set_vm_sid = sde_hw_set_vm_sid;
  417. return c;
  418. }
  419. void sde_hw_set_rotator_sid(struct sde_hw_sid *sid)
  420. {
  421. if (!sid)
  422. return;
  423. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_RD, ROT_SID_ID_VAL);
  424. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_WR, ROT_SID_ID_VAL);
  425. }
  426. void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm,
  427. struct sde_mdss_cfg *m)
  428. {
  429. u32 offset = 0;
  430. u32 vig_sid_offset = MDP_SID_VIG0;
  431. u32 dma_sid_offset = MDP_SID_DMA0;
  432. if (!sid)
  433. return;
  434. if (IS_SDE_SID_REV_200(m->sid_rev)) {
  435. vig_sid_offset = MDP_SID_V2_VIG0;
  436. dma_sid_offset = MDP_SID_V2_DMA0;
  437. }
  438. if (SDE_SSPP_VALID_VIG(pipe))
  439. offset = vig_sid_offset + ((pipe - SSPP_VIG0) * 4);
  440. else if (SDE_SSPP_VALID_DMA(pipe))
  441. offset = dma_sid_offset + ((pipe - SSPP_DMA0) * 4);
  442. else
  443. return;
  444. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  445. }
  446. static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
  447. bool dual, bool dspp_out)
  448. {
  449. u32 value = dspp_out ? 0x4 : 0x0;
  450. SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
  451. if (dual) {
  452. value |= 0x1;
  453. SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
  454. }
  455. }
  456. static void sde_hw_set_hdr_plus_metadata(struct sde_hw_mdp *mdp,
  457. u8 *payload, u32 len, u32 stream_id)
  458. {
  459. u32 i, b;
  460. u32 length = len - 1;
  461. u32 d_offset, nb_offset, data = 0;
  462. const u32 dword_size = sizeof(u32);
  463. bool is_4k_aligned = mdp->caps->features &
  464. BIT(SDE_MDP_DHDR_MEMPOOL_4K);
  465. if (!payload || !len) {
  466. SDE_ERROR("invalid payload with length: %d\n", len);
  467. return;
  468. }
  469. if (stream_id) {
  470. if (is_4k_aligned) {
  471. d_offset = DP_DHDR_MEM_POOL_1_DATA_4K;
  472. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES_4K;
  473. } else {
  474. d_offset = DP_DHDR_MEM_POOL_1_DATA;
  475. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES;
  476. }
  477. } else {
  478. if (is_4k_aligned) {
  479. d_offset = DP_DHDR_MEM_POOL_0_DATA_4K;
  480. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES_4K;
  481. } else {
  482. d_offset = DP_DHDR_MEM_POOL_0_DATA;
  483. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES;
  484. }
  485. }
  486. /* payload[0] is set in VSCEXT header byte 1, skip programming here */
  487. SDE_REG_WRITE(&mdp->hw, nb_offset, length);
  488. for (i = 1; i < len; i += dword_size) {
  489. for (b = 0; (i + b) < len && b < dword_size; b++)
  490. data |= payload[i + b] << (8 * b);
  491. SDE_REG_WRITE(&mdp->hw, d_offset, data);
  492. data = 0;
  493. }
  494. }
  495. static u32 sde_hw_get_autorefresh_status(struct sde_hw_mdp *mdp, u32 intf_idx)
  496. {
  497. struct sde_hw_blk_reg_map *c;
  498. u32 autorefresh_status;
  499. u32 blk_id = (intf_idx == INTF_2) ? 65 : 64;
  500. if (!mdp)
  501. return 0;
  502. c = &mdp->hw;
  503. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL,
  504. TEST_MASK(blk_id, AUTOREFRESH_TEST_POINT));
  505. SDE_REG_WRITE(&mdp->hw, MDP_DSPP_DBGBUS_CTRL, 0x7001);
  506. wmb(); /* make sure test bits were written */
  507. autorefresh_status = SDE_REG_READ(&mdp->hw, MDP_DSPP_DBGBUS_STATUS);
  508. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL, 0x0);
  509. return autorefresh_status;
  510. }
  511. static void sde_hw_hw_fence_timestamp_ctrl(struct sde_hw_mdp *mdp, bool enable, bool clear)
  512. {
  513. struct sde_hw_blk_reg_map c;
  514. u32 val;
  515. if (!mdp) {
  516. SDE_ERROR("invalid mdp, won't enable hw-fence timestamping\n");
  517. return;
  518. }
  519. /* start from the base-address of the mdss */
  520. c = mdp->hw;
  521. c.blk_off = 0x0;
  522. val = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL);
  523. if (enable)
  524. val |= BIT(0);
  525. else
  526. val &= ~BIT(0);
  527. if (clear)
  528. val |= BIT(1);
  529. else
  530. val &= ~BIT(1);
  531. SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL, val);
  532. }
  533. static void sde_hw_input_hw_fence_status(struct sde_hw_mdp *mdp, u64 *s_val, u64 *e_val)
  534. {
  535. u32 start_h, start_l, end_h, end_l;
  536. struct sde_hw_blk_reg_map c;
  537. if (!mdp || IS_ERR_OR_NULL(s_val) || IS_ERR_OR_NULL(e_val)) {
  538. SDE_ERROR("invalid mdp\n");
  539. return;
  540. }
  541. /* start from the base-address of the mdss */
  542. c = mdp->hw;
  543. c.blk_off = 0x0;
  544. start_l = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP0);
  545. start_h = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP1);
  546. *s_val = (u64)start_h << 32 | start_l;
  547. end_l = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP0);
  548. end_h = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP1);
  549. *e_val = (u64)end_h << 32 | end_l;
  550. /* clear the timestamps */
  551. sde_hw_hw_fence_timestamp_ctrl(mdp, false, true);
  552. wmb(); /* make sure the timestamps are cleared */
  553. }
  554. static void _sde_hw_setup_hw_input_fences_config(u32 protocol_id, u32 client_phys_id,
  555. unsigned long ipcc_base_addr, struct sde_hw_blk_reg_map *c)
  556. {
  557. u32 val, offset;
  558. /*select ipcc protocol id for dpu */
  559. val = (protocol_id == HW_FENCE_IPCC_FENCE_PROTOCOL_ID) ?
  560. HW_FENCE_DPU_FENCE_PROTOCOL_ID : protocol_id;
  561. SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_CTRL, val);
  562. /* set QOS priority */
  563. val = (HW_FENCE_QOS_PRIORITY_LVL << 4) | (HW_FENCE_QOS_PRIORITY & 0x7);
  564. SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_QOS, val);
  565. /* configure the start of the FENCE_IDn_ISR ops for input and output fence isr's */
  566. val = (HW_FENCE_DPU_OUTPUT_FENCE_START_N << 16) | (HW_FENCE_DPU_INPUT_FENCE_START_N & 0xFF);
  567. SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_ID_START_ADDR, val);
  568. /* setup input fence isr */
  569. /* configure the attribs for the isr read_reg op */
  570. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 0);
  571. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ipcc_base_addr,
  572. protocol_id, client_phys_id);
  573. SDE_REG_WRITE(c, offset, val);
  574. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 0);
  575. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x1);
  576. SDE_REG_WRITE(c, offset, val);
  577. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 0);
  578. SDE_REG_WRITE(c, offset, 0xFFFFFFFF);
  579. /* configure the attribs for the write if eq data */
  580. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_DATA, 1);
  581. SDE_REG_WRITE(c, offset, 0x1);
  582. /* program input-fence isr ops */
  583. /* set read_reg op */
  584. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  585. HW_FENCE_DPU_INPUT_FENCE_START_N);
  586. val = MDP_CTL_FENCE_ISR_OP_CODE(0x0, 0x0, 0x0, 0x0);
  587. SDE_REG_WRITE(c, offset, val);
  588. /* set write if eq op for flush ready */
  589. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  590. (HW_FENCE_DPU_INPUT_FENCE_START_N + 1));
  591. val = MDP_CTL_FENCE_ISR_OP_CODE(0x7, 0x0, 0x1, 0x0);
  592. SDE_REG_WRITE(c, offset, val);
  593. /* set exit op */
  594. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  595. (HW_FENCE_DPU_INPUT_FENCE_START_N + 2));
  596. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  597. SDE_REG_WRITE(c, offset, val);
  598. }
  599. static void sde_hw_setup_hw_fences_config(struct sde_hw_mdp *mdp, u32 protocol_id,
  600. u32 client_phys_id, unsigned long ipcc_base_addr)
  601. {
  602. u32 val, offset;
  603. struct sde_hw_blk_reg_map c;
  604. if (!mdp) {
  605. SDE_ERROR("invalid mdp, won't configure hw-fences\n");
  606. return;
  607. }
  608. c = mdp->hw;
  609. c.blk_off = 0x0;
  610. _sde_hw_setup_hw_input_fences_config(protocol_id, client_phys_id, ipcc_base_addr, &c);
  611. /*setup output fence isr */
  612. /* configure the attribs for the isr load_data op */
  613. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 4);
  614. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ipcc_base_addr,
  615. protocol_id, client_phys_id);
  616. SDE_REG_WRITE(&c, offset, val);
  617. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 4);
  618. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x0);
  619. SDE_REG_WRITE(&c, offset, val);
  620. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 4);
  621. SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
  622. /* program output-fence isr ops */
  623. /* set load_data op*/
  624. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  625. HW_FENCE_DPU_OUTPUT_FENCE_START_N);
  626. val = MDP_CTL_FENCE_ISR_OP_CODE(0x6, 0x0, 0x4, 0x0);
  627. SDE_REG_WRITE(&c, offset, val);
  628. /* set write_reg op */
  629. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  630. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 1));
  631. val = MDP_CTL_FENCE_ISR_OP_CODE(0x2, 0x4, 0x0, 0x0);
  632. SDE_REG_WRITE(&c, offset, val);
  633. /* set exit op */
  634. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  635. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 2));
  636. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  637. SDE_REG_WRITE(&c, offset, val);
  638. }
  639. void sde_hw_top_set_ppb_fifo_size(struct sde_hw_mdp *mdp, u32 pp, u32 sz)
  640. {
  641. struct sde_hw_blk_reg_map c;
  642. u32 offset, val, pp_index;
  643. if (!mdp) {
  644. SDE_ERROR("invalid mdp instance\n");
  645. return;
  646. }
  647. if (pp >= PINGPONG_MAX || ppb_offset_map[pp - PINGPONG_0] < 0) {
  648. SDE_ERROR("invalid pingpong index:%d max:%d\n", pp, PINGPONG_MAX);
  649. return;
  650. }
  651. pp_index = pp - PINGPONG_0;
  652. c = mdp->hw;
  653. offset = PPB_FIFO_SIZE + ((ppb_offset_map[pp_index] / 2) * 0x4);
  654. spin_lock(&mdp->slock);
  655. /* read, modify & update *respective 16 bit fields */
  656. val = SDE_REG_READ(&c, offset);
  657. /* divide by 4 as each fifo entry can store 4 pixels */
  658. sz = (sz / MDP_PPB_FIFO_ENTRY_SIZE) & 0xFFFF;
  659. sz = ppb_offset_map[pp_index] % 2 ? (sz << 16) : sz;
  660. val = (ppb_offset_map[pp_index] % 2) ? (val & 0xFFFF) : (val & 0xFFFF0000);
  661. SDE_REG_WRITE(&c, offset, val | sz);
  662. spin_unlock(&mdp->slock);
  663. }
  664. static void sde_hw_setup_hw_fences_config_with_dir_write(struct sde_hw_mdp *mdp, u32 protocol_id,
  665. u32 client_phys_id, unsigned long ipcc_base_addr)
  666. {
  667. u32 val, offset;
  668. struct sde_hw_blk_reg_map c;
  669. if (!mdp) {
  670. SDE_ERROR("invalid mdp, won't configure hw-fences\n");
  671. return;
  672. }
  673. c = mdp->hw;
  674. c.blk_off = 0x0;
  675. _sde_hw_setup_hw_input_fences_config(protocol_id, client_phys_id, ipcc_base_addr, &c);
  676. /*setup output fence isr */
  677. /* configure the attribs for the isr load_data op */
  678. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 4);
  679. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ipcc_base_addr,
  680. protocol_id, client_phys_id);
  681. SDE_REG_WRITE(&c, offset, val);
  682. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 4);
  683. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x0);
  684. SDE_REG_WRITE(&c, offset, val);
  685. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 4);
  686. SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
  687. /* program output-fence isr ops */
  688. /* set load_data op*/
  689. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  690. HW_FENCE_DPU_OUTPUT_FENCE_START_N);
  691. val = MDP_CTL_FENCE_ISR_OP_CODE(0x6, 0x0, 0x4, 0x0);
  692. SDE_REG_WRITE(&c, offset, val);
  693. /* set write_direct op */
  694. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  695. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 1));
  696. val = MDP_CTL_FENCE_ISR_OP_CODE(0x3, 0x0, 0x0, 0x0);
  697. SDE_REG_WRITE(&c, offset, val);
  698. /* set wait op */
  699. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  700. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 2));
  701. val = MDP_CTL_FENCE_ISR_OP_CODE(0x4, 0x1, 0x0, 0x0);
  702. SDE_REG_WRITE(&c, offset, val);
  703. /* set write_reg op */
  704. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  705. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 3));
  706. val = MDP_CTL_FENCE_ISR_OP_CODE(0x2, 0x4, 0x0, 0x0);
  707. SDE_REG_WRITE(&c, offset, val);
  708. /* set exit op */
  709. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  710. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 4));
  711. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  712. SDE_REG_WRITE(&c, offset, val);
  713. }
  714. static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, unsigned long cap, u32 hw_fence_rev)
  715. {
  716. ops->setup_split_pipe = sde_hw_setup_split_pipe;
  717. ops->setup_pp_split = sde_hw_setup_pp_split;
  718. ops->setup_cdm_output = sde_hw_setup_cdm_output;
  719. ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
  720. ops->get_clk_ctrl_status = sde_hw_get_clk_ctrl_status;
  721. ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
  722. ops->reset_ubwc = sde_hw_reset_ubwc;
  723. ops->intf_audio_select = sde_hw_intf_audio_select;
  724. ops->set_mdp_hw_events = sde_hw_mdp_events;
  725. if (cap & BIT(SDE_MDP_VSYNC_SEL))
  726. ops->setup_vsync_source = sde_hw_setup_vsync_source;
  727. else if (cap & BIT(SDE_MDP_WD_TIMER))
  728. ops->setup_vsync_source = sde_hw_setup_vsync_source_v1;
  729. if (cap & BIT(SDE_MDP_DHDR_MEMPOOL_4K) ||
  730. cap & BIT(SDE_MDP_DHDR_MEMPOOL))
  731. ops->set_hdr_plus_metadata = sde_hw_set_hdr_plus_metadata;
  732. ops->get_autorefresh_status = sde_hw_get_autorefresh_status;
  733. if (hw_fence_rev) {
  734. if (cap & BIT(SDE_MDP_HW_FENCE_DIR_WRITE))
  735. ops->setup_hw_fences = sde_hw_setup_hw_fences_config_with_dir_write;
  736. else
  737. ops->setup_hw_fences = sde_hw_setup_hw_fences_config;
  738. ops->hw_fence_input_timestamp_ctrl = sde_hw_hw_fence_timestamp_ctrl;
  739. ops->hw_fence_input_status = sde_hw_input_hw_fence_status;
  740. }
  741. if (cap & BIT(SDE_MDP_TOP_PPB_SET_SIZE))
  742. ops->set_ppb_fifo_size = sde_hw_top_set_ppb_fifo_size;
  743. }
  744. static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
  745. const struct sde_mdss_cfg *m,
  746. void __iomem *addr,
  747. struct sde_hw_blk_reg_map *b)
  748. {
  749. int i;
  750. if (!m || !addr || !b)
  751. return ERR_PTR(-EINVAL);
  752. for (i = 0; i < m->mdp_count; i++) {
  753. if (mdp == m->mdp[i].id) {
  754. b->base_off = addr;
  755. b->blk_off = m->mdp[i].base;
  756. b->length = m->mdp[i].len;
  757. b->hw_rev = m->hw_rev;
  758. b->log_mask = SDE_DBG_MASK_TOP;
  759. return &m->mdp[i];
  760. }
  761. }
  762. return ERR_PTR(-EINVAL);
  763. }
  764. struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
  765. void __iomem *addr,
  766. const struct sde_mdss_cfg *m)
  767. {
  768. struct sde_hw_mdp *mdp;
  769. const struct sde_mdp_cfg *cfg;
  770. if (!addr || !m)
  771. return ERR_PTR(-EINVAL);
  772. mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
  773. if (!mdp)
  774. return ERR_PTR(-ENOMEM);
  775. cfg = _top_offset(idx, m, addr, &mdp->hw);
  776. if (IS_ERR_OR_NULL(cfg)) {
  777. kfree(mdp);
  778. return ERR_PTR(-EINVAL);
  779. }
  780. spin_lock_init(&mdp->slock);
  781. /*
  782. * Assign ops
  783. */
  784. mdp->idx = idx;
  785. mdp->caps = cfg;
  786. _setup_mdp_ops(&mdp->ops, mdp->caps->features, m->hw_fence_rev);
  787. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "mdss_hw", 0,
  788. m->mdss_hw_block_size, 0);
  789. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &m->mdp[0].features)) {
  790. char name[SDE_HW_BLK_NAME_LEN];
  791. snprintf(name, sizeof(name), "%s_1", cfg->name);
  792. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, mdp->hw.blk_off,
  793. mdp->hw.blk_off + MDP_PERIPH_TOP0, mdp->hw.xin_id);
  794. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, name, mdp->hw.blk_off + MDP_SSPP_TOP2,
  795. mdp->hw.blk_off + mdp->hw.length, mdp->hw.xin_id);
  796. /* do not use blk_off, following offsets start from mdp_phys */
  797. if (m->hw_fence_rev) {
  798. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "hw_fence",
  799. MDP_CTL_HW_FENCE_CTRL,
  800. MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 5),
  801. mdp->hw.xin_id);
  802. }
  803. } else {
  804. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  805. mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
  806. mdp->hw.xin_id);
  807. }
  808. sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
  809. return mdp;
  810. }
  811. void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
  812. {
  813. kfree(mdp);
  814. }