sde_hw_top.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_hwio.h"
  8. #include "sde_hw_catalog.h"
  9. #include "sde_hw_top.h"
  10. #include "sde_dbg.h"
  11. #include "sde_kms.h"
  12. #define SSPP_SPARE 0x28
  13. #define UBWC_DEC_HW_VERSION 0x058
  14. #define UBWC_STATIC 0x144
  15. #define UBWC_CTRL_2 0x150
  16. #define UBWC_PREDICTION_MODE 0x154
  17. #define FLD_SPLIT_DISPLAY_CMD BIT(1)
  18. #define FLD_SMART_PANEL_FREE_RUN BIT(2)
  19. #define FLD_INTF_1_SW_TRG_MUX BIT(4)
  20. #define FLD_INTF_2_SW_TRG_MUX BIT(8)
  21. #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
  22. #define MDP_DSPP_DBGBUS_CTRL 0x348
  23. #define MDP_DSPP_DBGBUS_STATUS 0x34C
  24. #define DANGER_STATUS 0x360
  25. #define SAFE_STATUS 0x364
  26. #define TE_LINE_INTERVAL 0x3F4
  27. #define TRAFFIC_SHAPER_EN BIT(31)
  28. #define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
  29. #define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
  30. #define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
  31. #define MDP_WD_TIMER_0_CTL 0x380
  32. #define MDP_WD_TIMER_0_CTL2 0x384
  33. #define MDP_WD_TIMER_0_LOAD_VALUE 0x388
  34. #define MDP_WD_TIMER_1_CTL 0x390
  35. #define MDP_WD_TIMER_1_CTL2 0x394
  36. #define MDP_WD_TIMER_1_LOAD_VALUE 0x398
  37. #define MDP_PERIPH_DBGBUS_CTRL 0x418
  38. #define MDP_WD_TIMER_2_CTL 0x420
  39. #define MDP_WD_TIMER_2_CTL2 0x424
  40. #define MDP_WD_TIMER_2_LOAD_VALUE 0x428
  41. #define MDP_WD_TIMER_3_CTL 0x430
  42. #define MDP_WD_TIMER_3_CTL2 0x434
  43. #define MDP_WD_TIMER_3_LOAD_VALUE 0x438
  44. #define MDP_WD_TIMER_4_CTL 0x440
  45. #define MDP_WD_TIMER_4_CTL2 0x444
  46. #define MDP_WD_TIMER_4_LOAD_VALUE 0x448
  47. #define MDP_PERIPH_TOP0 0x380
  48. #define MDP_SSPP_TOP2 0x3A8
  49. #define AUTOREFRESH_TEST_POINT 0x2
  50. #define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  51. #define DCE_SEL 0x450
  52. #define MDP_SID_V2_VIG0 0x000
  53. #define MDP_SID_V2_DMA0 0x040
  54. #define MDP_SID_V2_CTL_0 0x100
  55. #define MDP_SID_V2_LTM0 0x400
  56. #define MDP_SID_V2_IPC_READ 0x200
  57. #define MDP_SID_V2_LUTDMA_RD 0x300
  58. #define MDP_SID_V2_LUTDMA_WR 0x304
  59. #define MDP_SID_V2_LUTDMA_SB_RD 0x308
  60. #define MDP_SID_V2_LUTDMA_VM_0 0x310
  61. #define MDP_SID_V2_DSI0 0x500
  62. #define MDP_SID_V2_DSI1 0x504
  63. #define MDP_SID_VIG0 0x0
  64. #define MDP_SID_VIG1 0x4
  65. #define MDP_SID_VIG2 0x8
  66. #define MDP_SID_VIG3 0xC
  67. #define MDP_SID_DMA0 0x10
  68. #define MDP_SID_DMA1 0x14
  69. #define MDP_SID_DMA2 0x18
  70. #define MDP_SID_DMA3 0x1C
  71. #define MDP_SID_ROT_RD 0x20
  72. #define MDP_SID_ROT_WR 0x24
  73. #define MDP_SID_WB2 0x28
  74. #define MDP_SID_XIN7 0x2C
  75. #define ROT_SID_ID_VAL 0x1c
  76. /* HW Fences */
  77. #define MDP_CTL_HW_FENCE_CTRL 0x14000
  78. #define MDP_CTL_HW_FENCE_ID_START_ADDR 0x14004
  79. #define MDP_CTL_HW_FENCE_ID_STATUS 0x14008
  80. #define MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL 0x1400c
  81. #define MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP0 0x14010
  82. #define MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP1 0x14014
  83. #define MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP0 0x14018
  84. #define MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP1 0x1401c
  85. #define MDP_CTL_HW_FENCE_QOS 0x14020
  86. #define MDP_CTL_HW_FENCE_IDn_ISR 0x14050
  87. #define MDP_CTL_HW_FENCE_IDm_ADDR 0x14054
  88. #define MDP_CTL_HW_FENCE_IDm_DATA 0x14058
  89. #define MDP_CTL_HW_FENCE_IDm_MASK 0x1405c
  90. #define MDP_CTL_HW_FENCE_IDm_ATTR 0x14060
  91. #define HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ba, p, c) ((ba+0xc) + (0x40000*p) + (0x1000*c))
  92. #define HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ba, p, c) ((ba+0x10) + (0x40000*p) + (0x1000*c))
  93. #define MDP_CTL_HW_FENCE_ID_OFFSET_n(base, n) (base + (0x14*n))
  94. #define MDP_CTL_HW_FENCE_ID_OFFSET_m(base, m) (base + (0x14*m))
  95. #define MDP_CTL_FENCE_ATTRS(devicetype, size, resp_req) \
  96. (((resp_req & 0x1) << 16) | ((size & 0x7) << 4) | (devicetype & 0xf))
  97. #define MDP_CTL_FENCE_ISR_OP_CODE(opcode, op0, op1, op2) \
  98. (((op2 & 0xff) << 24) | ((op1 & 0xff) << 16) | ((op0 & 0xff) << 8) | (opcode & 0xff))
  99. #define HW_FENCE_DPU_INPUT_FENCE_START_N 0
  100. #define HW_FENCE_DPU_OUTPUT_FENCE_START_N 4
  101. #define HW_FENCE_IPCC_FENCE_PROTOCOL_ID 4
  102. #define HW_FENCE_DPU_FENCE_PROTOCOL_ID 3
  103. static int ppb_offset_map[PINGPONG_MAX] = {1, 0, 3, 2, 5, 4, 7, 7, 6, 6, -1, -1};
  104. static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
  105. struct split_pipe_cfg *cfg)
  106. {
  107. struct sde_hw_blk_reg_map *c;
  108. u32 upper_pipe = 0;
  109. u32 lower_pipe = 0;
  110. if (!mdp || !cfg)
  111. return;
  112. c = &mdp->hw;
  113. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &mdp->caps->features) && cfg->en) {
  114. /* avoid programming of legacy bits like SW_TRG_MUX for new targets */
  115. if (cfg->mode == INTF_MODE_CMD) {
  116. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  117. upper_pipe = lower_pipe;
  118. if (cfg->pp_split_slave != INTF_MAX)
  119. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  120. }
  121. } else if (cfg->en) {
  122. if (cfg->mode == INTF_MODE_CMD) {
  123. lower_pipe = FLD_SPLIT_DISPLAY_CMD;
  124. /* interface controlling sw trigger */
  125. if (cfg->intf == INTF_2)
  126. lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
  127. else
  128. lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
  129. /* free run */
  130. if (cfg->pp_split_slave != INTF_MAX)
  131. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  132. upper_pipe = lower_pipe;
  133. /* smart panel align mode */
  134. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  135. } else {
  136. if (cfg->intf == INTF_2) {
  137. lower_pipe = FLD_INTF_1_SW_TRG_MUX;
  138. upper_pipe = FLD_INTF_2_SW_TRG_MUX;
  139. } else {
  140. lower_pipe = FLD_INTF_2_SW_TRG_MUX;
  141. upper_pipe = FLD_INTF_1_SW_TRG_MUX;
  142. }
  143. }
  144. }
  145. SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
  146. SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
  147. SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
  148. SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
  149. }
  150. static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
  151. struct split_pipe_cfg *cfg)
  152. {
  153. u32 ppb_config = 0x0;
  154. u32 ppb_control = 0x0;
  155. if (!mdp || !cfg)
  156. return;
  157. if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
  158. ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
  159. ppb_config |= BIT(16); /* split enable */
  160. ppb_control = BIT(5); /* horz split*/
  161. }
  162. if (cfg->pp_split_index) {
  163. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
  164. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
  165. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
  166. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
  167. } else {
  168. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
  169. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
  170. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
  171. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
  172. }
  173. }
  174. static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
  175. struct cdm_output_cfg *cfg)
  176. {
  177. struct sde_hw_blk_reg_map *c;
  178. u32 out_ctl = 0;
  179. if (!mdp || !cfg)
  180. return;
  181. c = &mdp->hw;
  182. if (cfg->wb_en)
  183. out_ctl |= BIT(24);
  184. else if (cfg->intf_en)
  185. out_ctl |= BIT(19);
  186. SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
  187. }
  188. static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
  189. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  190. {
  191. struct sde_hw_blk_reg_map *c;
  192. u32 reg_off, bit_off;
  193. u32 reg_val, new_val;
  194. bool clk_forced_on;
  195. if (!mdp)
  196. return false;
  197. c = &mdp->hw;
  198. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
  199. return false;
  200. reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
  201. bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
  202. reg_val = SDE_REG_READ(c, reg_off);
  203. if (enable)
  204. new_val = reg_val | BIT(bit_off);
  205. else
  206. new_val = reg_val & ~BIT(bit_off);
  207. SDE_REG_WRITE(c, reg_off, new_val);
  208. wmb(); /* ensure write finished before progressing */
  209. clk_forced_on = !(reg_val & BIT(bit_off));
  210. return clk_forced_on;
  211. }
  212. static int sde_hw_get_clk_ctrl_status(struct sde_hw_mdp *mdp,
  213. enum sde_clk_ctrl_type clk_ctrl, bool *status)
  214. {
  215. struct sde_hw_blk_reg_map *c;
  216. u32 reg_off, bit_off;
  217. if (!mdp)
  218. return -EINVAL;
  219. c = &mdp->hw;
  220. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX ||
  221. !mdp->caps->clk_status[clk_ctrl].reg_off)
  222. return -EINVAL;
  223. reg_off = mdp->caps->clk_status[clk_ctrl].reg_off;
  224. bit_off = mdp->caps->clk_status[clk_ctrl].bit_off;
  225. *status = SDE_REG_READ(c, reg_off) & BIT(bit_off);
  226. return 0;
  227. }
  228. static void _update_vsync_source(struct sde_hw_mdp *mdp,
  229. struct sde_vsync_source_cfg *cfg)
  230. {
  231. struct sde_hw_blk_reg_map *c;
  232. u32 reg, wd_load_value, wd_ctl, wd_ctl2;
  233. if (!mdp || !cfg)
  234. return;
  235. c = &mdp->hw;
  236. if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
  237. cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
  238. switch (cfg->vsync_source) {
  239. case SDE_VSYNC_SOURCE_WD_TIMER_4:
  240. wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
  241. wd_ctl = MDP_WD_TIMER_4_CTL;
  242. wd_ctl2 = MDP_WD_TIMER_4_CTL2;
  243. break;
  244. case SDE_VSYNC_SOURCE_WD_TIMER_3:
  245. wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
  246. wd_ctl = MDP_WD_TIMER_3_CTL;
  247. wd_ctl2 = MDP_WD_TIMER_3_CTL2;
  248. break;
  249. case SDE_VSYNC_SOURCE_WD_TIMER_2:
  250. wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
  251. wd_ctl = MDP_WD_TIMER_2_CTL;
  252. wd_ctl2 = MDP_WD_TIMER_2_CTL2;
  253. break;
  254. case SDE_VSYNC_SOURCE_WD_TIMER_1:
  255. wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
  256. wd_ctl = MDP_WD_TIMER_1_CTL;
  257. wd_ctl2 = MDP_WD_TIMER_1_CTL2;
  258. break;
  259. case SDE_VSYNC_SOURCE_WD_TIMER_0:
  260. default:
  261. wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
  262. wd_ctl = MDP_WD_TIMER_0_CTL;
  263. wd_ctl2 = MDP_WD_TIMER_0_CTL2;
  264. break;
  265. }
  266. SDE_REG_WRITE(c, wd_load_value, CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
  267. SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
  268. reg = SDE_REG_READ(c, wd_ctl2);
  269. reg |= BIT(8); /* enable heartbeat timer */
  270. reg |= BIT(0); /* enable WD timer */
  271. SDE_REG_WRITE(c, wd_ctl2, reg);
  272. /* make sure that timers are enabled/disabled for vsync state */
  273. wmb();
  274. }
  275. }
  276. static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
  277. struct sde_vsync_source_cfg *cfg)
  278. {
  279. struct sde_hw_blk_reg_map *c;
  280. u32 reg, i;
  281. static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
  282. if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
  283. return;
  284. c = &mdp->hw;
  285. reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
  286. for (i = 0; i < cfg->pp_count; i++) {
  287. int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
  288. if (pp_idx >= ARRAY_SIZE(pp_offset))
  289. continue;
  290. reg &= ~(0xf << pp_offset[pp_idx]);
  291. reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
  292. }
  293. SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
  294. _update_vsync_source(mdp, cfg);
  295. }
  296. static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
  297. struct sde_vsync_source_cfg *cfg)
  298. {
  299. _update_vsync_source(mdp, cfg);
  300. }
  301. void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
  302. {
  303. struct sde_hw_blk_reg_map c;
  304. u32 ubwc_dec_version;
  305. u32 ubwc_enc_version;
  306. if (!mdp || !m)
  307. return;
  308. /* force blk offset to zero to access beginning of register region */
  309. c = mdp->hw;
  310. c.blk_off = 0x0;
  311. ubwc_dec_version = SDE_REG_READ(&c, UBWC_DEC_HW_VERSION);
  312. ubwc_enc_version = m->ubwc_rev;
  313. if (IS_UBWC_40_SUPPORTED(ubwc_dec_version) || IS_UBWC_43_SUPPORTED(ubwc_dec_version)) {
  314. u32 ver = IS_UBWC_43_SUPPORTED(ubwc_dec_version) ? 3 : 2;
  315. u32 mode = 1;
  316. u32 reg = (m->mdp[0].ubwc_swizzle & 0x7) |
  317. ((m->mdp[0].ubwc_static & 0x1) << 3) |
  318. ((m->mdp[0].highest_bank_bit & 0x7) << 4) |
  319. ((m->macrotile_mode & 0x1) << 12);
  320. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version)) {
  321. ver = 1;
  322. mode = 0;
  323. }
  324. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  325. SDE_REG_WRITE(&c, UBWC_CTRL_2, ver);
  326. SDE_REG_WRITE(&c, UBWC_PREDICTION_MODE, mode);
  327. } else if (IS_UBWC_20_SUPPORTED(ubwc_dec_version)) {
  328. SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
  329. } else if (IS_UBWC_30_SUPPORTED(ubwc_dec_version)) {
  330. u32 reg = m->mdp[0].ubwc_static |
  331. (m->mdp[0].ubwc_swizzle & 0x1) |
  332. ((m->mdp[0].highest_bank_bit & 0x3) << 4) |
  333. ((m->macrotile_mode & 0x1) << 12);
  334. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version))
  335. reg |= BIT(10);
  336. if (IS_UBWC_10_SUPPORTED(ubwc_enc_version))
  337. reg |= BIT(8);
  338. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  339. } else {
  340. SDE_ERROR("unsupported ubwc decoder version 0x%08x\n", ubwc_dec_version);
  341. }
  342. }
  343. static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp)
  344. {
  345. struct sde_hw_blk_reg_map *c;
  346. if (!mdp)
  347. return;
  348. c = &mdp->hw;
  349. SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
  350. }
  351. static void sde_hw_mdp_events(struct sde_hw_mdp *mdp, bool enable)
  352. {
  353. struct sde_hw_blk_reg_map *c;
  354. if (!mdp)
  355. return;
  356. c = &mdp->hw;
  357. SDE_REG_WRITE(c, HW_EVENTS_CTL, enable);
  358. }
  359. void sde_hw_set_vm_sid_v2(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  360. {
  361. u32 offset = 0;
  362. int i;
  363. if (!sid || !m)
  364. return;
  365. for (i = 0; i < m->ctl_count; i++) {
  366. offset = MDP_SID_V2_CTL_0 + (i * 4);
  367. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  368. }
  369. for (i = 0; i < m->ltm_count; i++) {
  370. offset = MDP_SID_V2_LTM0 + (i * 4);
  371. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  372. }
  373. if (SDE_HW_MAJOR(sid->hw.hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_A00)) {
  374. for (i = 0; i < m->ctl_count; i++) {
  375. offset = MDP_SID_V2_LUTDMA_VM_0 + (i * 4);
  376. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  377. }
  378. }
  379. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_IPC_READ, vm << 2);
  380. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_RD, vm << 2);
  381. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_WR, vm << 2);
  382. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_SB_RD, vm << 2);
  383. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI0, vm << 2);
  384. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI1, vm << 2);
  385. }
  386. void sde_hw_set_vm_sid(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  387. {
  388. if (!sid || !m)
  389. return;
  390. SDE_REG_WRITE(&sid->hw, MDP_SID_XIN7, vm << 2);
  391. }
  392. struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
  393. u32 sid_len, const struct sde_mdss_cfg *m)
  394. {
  395. struct sde_hw_sid *c;
  396. c = kzalloc(sizeof(*c), GFP_KERNEL);
  397. if (!c)
  398. return ERR_PTR(-ENOMEM);
  399. c->hw.base_off = addr;
  400. c->hw.blk_off = 0;
  401. c->hw.length = sid_len;
  402. c->hw.hw_rev = m->hw_rev;
  403. c->hw.log_mask = SDE_DBG_MASK_SID;
  404. if (IS_SDE_SID_REV_200(m->sid_rev))
  405. c->ops.set_vm_sid = sde_hw_set_vm_sid_v2;
  406. else
  407. c->ops.set_vm_sid = sde_hw_set_vm_sid;
  408. return c;
  409. }
  410. void sde_hw_set_rotator_sid(struct sde_hw_sid *sid)
  411. {
  412. if (!sid)
  413. return;
  414. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_RD, ROT_SID_ID_VAL);
  415. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_WR, ROT_SID_ID_VAL);
  416. }
  417. void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm,
  418. struct sde_mdss_cfg *m)
  419. {
  420. u32 offset = 0;
  421. u32 vig_sid_offset = MDP_SID_VIG0;
  422. u32 dma_sid_offset = MDP_SID_DMA0;
  423. if (!sid)
  424. return;
  425. if (IS_SDE_SID_REV_200(m->sid_rev)) {
  426. vig_sid_offset = MDP_SID_V2_VIG0;
  427. dma_sid_offset = MDP_SID_V2_DMA0;
  428. }
  429. if (SDE_SSPP_VALID_VIG(pipe))
  430. offset = vig_sid_offset + ((pipe - SSPP_VIG0) * 4);
  431. else if (SDE_SSPP_VALID_DMA(pipe))
  432. offset = dma_sid_offset + ((pipe - SSPP_DMA0) * 4);
  433. else
  434. return;
  435. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  436. }
  437. static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
  438. bool dual, bool dspp_out)
  439. {
  440. u32 value = dspp_out ? 0x4 : 0x0;
  441. SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
  442. if (dual) {
  443. value |= 0x1;
  444. SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
  445. }
  446. }
  447. static void sde_hw_set_hdr_plus_metadata(struct sde_hw_mdp *mdp,
  448. u8 *payload, u32 len, u32 stream_id)
  449. {
  450. u32 i, b;
  451. u32 length = len - 1;
  452. u32 d_offset, nb_offset, data = 0;
  453. const u32 dword_size = sizeof(u32);
  454. bool is_4k_aligned = mdp->caps->features &
  455. BIT(SDE_MDP_DHDR_MEMPOOL_4K);
  456. if (!payload || !len) {
  457. SDE_ERROR("invalid payload with length: %d\n", len);
  458. return;
  459. }
  460. if (stream_id) {
  461. if (is_4k_aligned) {
  462. d_offset = DP_DHDR_MEM_POOL_1_DATA_4K;
  463. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES_4K;
  464. } else {
  465. d_offset = DP_DHDR_MEM_POOL_1_DATA;
  466. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES;
  467. }
  468. } else {
  469. if (is_4k_aligned) {
  470. d_offset = DP_DHDR_MEM_POOL_0_DATA_4K;
  471. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES_4K;
  472. } else {
  473. d_offset = DP_DHDR_MEM_POOL_0_DATA;
  474. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES;
  475. }
  476. }
  477. /* payload[0] is set in VSCEXT header byte 1, skip programming here */
  478. SDE_REG_WRITE(&mdp->hw, nb_offset, length);
  479. for (i = 1; i < len; i += dword_size) {
  480. for (b = 0; (i + b) < len && b < dword_size; b++)
  481. data |= payload[i + b] << (8 * b);
  482. SDE_REG_WRITE(&mdp->hw, d_offset, data);
  483. data = 0;
  484. }
  485. }
  486. static u32 sde_hw_get_autorefresh_status(struct sde_hw_mdp *mdp, u32 intf_idx)
  487. {
  488. struct sde_hw_blk_reg_map *c;
  489. u32 autorefresh_status;
  490. u32 blk_id = (intf_idx == INTF_2) ? 65 : 64;
  491. if (!mdp)
  492. return 0;
  493. c = &mdp->hw;
  494. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL,
  495. TEST_MASK(blk_id, AUTOREFRESH_TEST_POINT));
  496. SDE_REG_WRITE(&mdp->hw, MDP_DSPP_DBGBUS_CTRL, 0x7001);
  497. wmb(); /* make sure test bits were written */
  498. autorefresh_status = SDE_REG_READ(&mdp->hw, MDP_DSPP_DBGBUS_STATUS);
  499. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL, 0x0);
  500. return autorefresh_status;
  501. }
  502. static void sde_hw_hw_fence_timestamp_ctrl(struct sde_hw_mdp *mdp, bool enable, bool clear)
  503. {
  504. struct sde_hw_blk_reg_map c;
  505. u32 val;
  506. if (!mdp) {
  507. SDE_ERROR("invalid mdp, won't enable hw-fence timestamping\n");
  508. return;
  509. }
  510. /* start from the base-address of the mdss */
  511. c = mdp->hw;
  512. c.blk_off = 0x0;
  513. val = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL);
  514. if (enable)
  515. val |= BIT(0);
  516. else
  517. val &= ~BIT(0);
  518. if (clear)
  519. val |= BIT(1);
  520. else
  521. val &= ~BIT(1);
  522. SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL, val);
  523. }
  524. static void sde_hw_input_hw_fence_status(struct sde_hw_mdp *mdp, u64 *s_val, u64 *e_val)
  525. {
  526. u32 start_h, start_l, end_h, end_l;
  527. struct sde_hw_blk_reg_map c;
  528. if (!mdp || IS_ERR_OR_NULL(s_val) || IS_ERR_OR_NULL(e_val)) {
  529. SDE_ERROR("invalid mdp\n");
  530. return;
  531. }
  532. /* start from the base-address of the mdss */
  533. c = mdp->hw;
  534. c.blk_off = 0x0;
  535. start_l = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP0);
  536. start_h = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP1);
  537. *s_val = (u64)start_h << 32 | start_l;
  538. end_l = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP0);
  539. end_h = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP1);
  540. *e_val = (u64)end_h << 32 | end_l;
  541. /* clear the timestamps */
  542. sde_hw_hw_fence_timestamp_ctrl(mdp, false, true);
  543. wmb(); /* make sure the timestamps are cleared */
  544. }
  545. static void _sde_hw_setup_hw_input_fences_config(u32 protocol_id, u32 client_phys_id,
  546. unsigned long ipcc_base_addr, struct sde_hw_blk_reg_map *c)
  547. {
  548. u32 val, offset;
  549. /*select ipcc protocol id for dpu */
  550. val = (protocol_id == HW_FENCE_IPCC_FENCE_PROTOCOL_ID) ?
  551. HW_FENCE_DPU_FENCE_PROTOCOL_ID : protocol_id;
  552. SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_CTRL, val);
  553. /* configure the start of the FENCE_IDn_ISR ops for input and output fence isr's */
  554. val = (HW_FENCE_DPU_OUTPUT_FENCE_START_N << 16) | (HW_FENCE_DPU_INPUT_FENCE_START_N & 0xFF);
  555. SDE_REG_WRITE(c, MDP_CTL_HW_FENCE_ID_START_ADDR, val);
  556. /* setup input fence isr */
  557. /* configure the attribs for the isr read_reg op */
  558. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 0);
  559. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ipcc_base_addr,
  560. protocol_id, client_phys_id);
  561. SDE_REG_WRITE(c, offset, val);
  562. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 0);
  563. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x1);
  564. SDE_REG_WRITE(c, offset, val);
  565. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 0);
  566. SDE_REG_WRITE(c, offset, 0xFFFFFFFF);
  567. /* configure the attribs for the write if eq data */
  568. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_DATA, 1);
  569. SDE_REG_WRITE(c, offset, 0x1);
  570. /* program input-fence isr ops */
  571. /* set read_reg op */
  572. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  573. HW_FENCE_DPU_INPUT_FENCE_START_N);
  574. val = MDP_CTL_FENCE_ISR_OP_CODE(0x0, 0x0, 0x0, 0x0);
  575. SDE_REG_WRITE(c, offset, val);
  576. /* set write if eq op for flush ready */
  577. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  578. (HW_FENCE_DPU_INPUT_FENCE_START_N + 1));
  579. val = MDP_CTL_FENCE_ISR_OP_CODE(0x7, 0x0, 0x1, 0x0);
  580. SDE_REG_WRITE(c, offset, val);
  581. /* set exit op */
  582. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  583. (HW_FENCE_DPU_INPUT_FENCE_START_N + 2));
  584. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  585. SDE_REG_WRITE(c, offset, val);
  586. }
  587. static void sde_hw_setup_hw_fences_config(struct sde_hw_mdp *mdp, u32 protocol_id,
  588. u32 client_phys_id, unsigned long ipcc_base_addr)
  589. {
  590. u32 val, offset;
  591. struct sde_hw_blk_reg_map c;
  592. if (!mdp) {
  593. SDE_ERROR("invalid mdp, won't configure hw-fences\n");
  594. return;
  595. }
  596. c = mdp->hw;
  597. c.blk_off = 0x0;
  598. _sde_hw_setup_hw_input_fences_config(protocol_id, client_phys_id, ipcc_base_addr, &c);
  599. /*setup output fence isr */
  600. /* configure the attribs for the isr load_data op */
  601. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 4);
  602. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ipcc_base_addr,
  603. protocol_id, client_phys_id);
  604. SDE_REG_WRITE(&c, offset, val);
  605. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 4);
  606. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x0);
  607. SDE_REG_WRITE(&c, offset, val);
  608. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 4);
  609. SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
  610. /* program output-fence isr ops */
  611. /* set load_data op*/
  612. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  613. HW_FENCE_DPU_OUTPUT_FENCE_START_N);
  614. val = MDP_CTL_FENCE_ISR_OP_CODE(0x6, 0x0, 0x4, 0x0);
  615. SDE_REG_WRITE(&c, offset, val);
  616. /* set write_reg op */
  617. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  618. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 1));
  619. val = MDP_CTL_FENCE_ISR_OP_CODE(0x2, 0x4, 0x0, 0x0);
  620. SDE_REG_WRITE(&c, offset, val);
  621. /* set exit op */
  622. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  623. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 2));
  624. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  625. SDE_REG_WRITE(&c, offset, val);
  626. }
  627. void sde_hw_top_set_ppb_fifo_size(struct sde_hw_mdp *mdp, u32 pp, u32 sz)
  628. {
  629. struct sde_hw_blk_reg_map c;
  630. u32 offset, val, pp_index;
  631. if (!mdp) {
  632. SDE_ERROR("invalid mdp instance\n");
  633. return;
  634. }
  635. if (pp >= PINGPONG_MAX || ppb_offset_map[pp - PINGPONG_0] < 0) {
  636. SDE_ERROR("invalid pingpong index:%d max:%d\n", pp, PINGPONG_MAX);
  637. return;
  638. }
  639. pp_index = pp - PINGPONG_0;
  640. c = mdp->hw;
  641. offset = PPB_FIFO_SIZE + ((ppb_offset_map[pp_index] / 2) * 0x4);
  642. spin_lock(&mdp->slock);
  643. /* read, modify & update *respective 16 bit fields */
  644. val = SDE_REG_READ(&c, offset);
  645. /* divide by 4 as each fifo entry can store 4 pixels */
  646. sz = (sz / MDP_PPB_FIFO_ENTRY_SIZE) & 0xFFFF;
  647. sz = ppb_offset_map[pp_index] % 2 ? (sz << 16) : sz;
  648. val = (ppb_offset_map[pp_index] % 2) ? (val & 0xFFFF) : (val & 0xFFFF0000);
  649. SDE_REG_WRITE(&c, offset, val | sz);
  650. spin_unlock(&mdp->slock);
  651. }
  652. static void sde_hw_setup_hw_fences_config_with_dir_write(struct sde_hw_mdp *mdp, u32 protocol_id,
  653. u32 client_phys_id, unsigned long ipcc_base_addr)
  654. {
  655. u32 val, offset;
  656. struct sde_hw_blk_reg_map c;
  657. if (!mdp) {
  658. SDE_ERROR("invalid mdp, won't configure hw-fences\n");
  659. return;
  660. }
  661. c = mdp->hw;
  662. c.blk_off = 0x0;
  663. _sde_hw_setup_hw_input_fences_config(protocol_id, client_phys_id, ipcc_base_addr, &c);
  664. /*setup output fence isr */
  665. /* configure the attribs for the isr load_data op */
  666. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 4);
  667. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ipcc_base_addr,
  668. protocol_id, client_phys_id);
  669. SDE_REG_WRITE(&c, offset, val);
  670. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 4);
  671. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x0);
  672. SDE_REG_WRITE(&c, offset, val);
  673. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 4);
  674. SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
  675. /* program output-fence isr ops */
  676. /* set load_data op*/
  677. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  678. HW_FENCE_DPU_OUTPUT_FENCE_START_N);
  679. val = MDP_CTL_FENCE_ISR_OP_CODE(0x6, 0x0, 0x4, 0x0);
  680. SDE_REG_WRITE(&c, offset, val);
  681. /* set write_direct op */
  682. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  683. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 1));
  684. val = MDP_CTL_FENCE_ISR_OP_CODE(0x3, 0x0, 0x0, 0x0);
  685. SDE_REG_WRITE(&c, offset, val);
  686. /* set wait op */
  687. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  688. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 2));
  689. val = MDP_CTL_FENCE_ISR_OP_CODE(0x4, 0x1, 0x0, 0x0);
  690. SDE_REG_WRITE(&c, offset, val);
  691. /* set write_reg op */
  692. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  693. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 3));
  694. val = MDP_CTL_FENCE_ISR_OP_CODE(0x2, 0x4, 0x0, 0x0);
  695. SDE_REG_WRITE(&c, offset, val);
  696. /* set exit op */
  697. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  698. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 4));
  699. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  700. SDE_REG_WRITE(&c, offset, val);
  701. }
  702. static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, unsigned long cap, u32 hw_fence_rev)
  703. {
  704. ops->setup_split_pipe = sde_hw_setup_split_pipe;
  705. ops->setup_pp_split = sde_hw_setup_pp_split;
  706. ops->setup_cdm_output = sde_hw_setup_cdm_output;
  707. ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
  708. ops->get_clk_ctrl_status = sde_hw_get_clk_ctrl_status;
  709. ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
  710. ops->reset_ubwc = sde_hw_reset_ubwc;
  711. ops->intf_audio_select = sde_hw_intf_audio_select;
  712. ops->set_mdp_hw_events = sde_hw_mdp_events;
  713. if (cap & BIT(SDE_MDP_VSYNC_SEL))
  714. ops->setup_vsync_source = sde_hw_setup_vsync_source;
  715. else if (cap & BIT(SDE_MDP_WD_TIMER))
  716. ops->setup_vsync_source = sde_hw_setup_vsync_source_v1;
  717. if (cap & BIT(SDE_MDP_DHDR_MEMPOOL_4K) ||
  718. cap & BIT(SDE_MDP_DHDR_MEMPOOL))
  719. ops->set_hdr_plus_metadata = sde_hw_set_hdr_plus_metadata;
  720. ops->get_autorefresh_status = sde_hw_get_autorefresh_status;
  721. if (hw_fence_rev) {
  722. if (cap & BIT(SDE_MDP_HW_FENCE_DIR_WRITE))
  723. ops->setup_hw_fences = sde_hw_setup_hw_fences_config_with_dir_write;
  724. else
  725. ops->setup_hw_fences = sde_hw_setup_hw_fences_config;
  726. ops->hw_fence_input_timestamp_ctrl = sde_hw_hw_fence_timestamp_ctrl;
  727. ops->hw_fence_input_status = sde_hw_input_hw_fence_status;
  728. }
  729. if (cap & BIT(SDE_MDP_TOP_PPB_SET_SIZE))
  730. ops->set_ppb_fifo_size = sde_hw_top_set_ppb_fifo_size;
  731. }
  732. static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
  733. const struct sde_mdss_cfg *m,
  734. void __iomem *addr,
  735. struct sde_hw_blk_reg_map *b)
  736. {
  737. int i;
  738. if (!m || !addr || !b)
  739. return ERR_PTR(-EINVAL);
  740. for (i = 0; i < m->mdp_count; i++) {
  741. if (mdp == m->mdp[i].id) {
  742. b->base_off = addr;
  743. b->blk_off = m->mdp[i].base;
  744. b->length = m->mdp[i].len;
  745. b->hw_rev = m->hw_rev;
  746. b->log_mask = SDE_DBG_MASK_TOP;
  747. return &m->mdp[i];
  748. }
  749. }
  750. return ERR_PTR(-EINVAL);
  751. }
  752. struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
  753. void __iomem *addr,
  754. const struct sde_mdss_cfg *m)
  755. {
  756. struct sde_hw_mdp *mdp;
  757. const struct sde_mdp_cfg *cfg;
  758. if (!addr || !m)
  759. return ERR_PTR(-EINVAL);
  760. mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
  761. if (!mdp)
  762. return ERR_PTR(-ENOMEM);
  763. cfg = _top_offset(idx, m, addr, &mdp->hw);
  764. if (IS_ERR_OR_NULL(cfg)) {
  765. kfree(mdp);
  766. return ERR_PTR(-EINVAL);
  767. }
  768. spin_lock_init(&mdp->slock);
  769. /*
  770. * Assign ops
  771. */
  772. mdp->idx = idx;
  773. mdp->caps = cfg;
  774. _setup_mdp_ops(&mdp->ops, mdp->caps->features, m->hw_fence_rev);
  775. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "mdss_hw", 0,
  776. m->mdss_hw_block_size, 0);
  777. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &m->mdp[0].features)) {
  778. char name[SDE_HW_BLK_NAME_LEN];
  779. snprintf(name, sizeof(name), "%s_1", cfg->name);
  780. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, mdp->hw.blk_off,
  781. mdp->hw.blk_off + MDP_PERIPH_TOP0, mdp->hw.xin_id);
  782. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, name, mdp->hw.blk_off + MDP_SSPP_TOP2,
  783. mdp->hw.blk_off + mdp->hw.length, mdp->hw.xin_id);
  784. /* do not use blk_off, following offsets start from mdp_phys */
  785. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "hw_fence", MDP_CTL_HW_FENCE_CTRL,
  786. MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 5), mdp->hw.xin_id);
  787. } else {
  788. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  789. mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
  790. mdp->hw.xin_id);
  791. }
  792. sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
  793. return mdp;
  794. }
  795. void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
  796. {
  797. kfree(mdp);
  798. }