sde_hw_top.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_hwio.h"
  8. #include "sde_hw_catalog.h"
  9. #include "sde_hw_top.h"
  10. #include "sde_dbg.h"
  11. #include "sde_kms.h"
  12. #define SSPP_SPARE 0x28
  13. #define UBWC_DEC_HW_VERSION 0x058
  14. #define UBWC_STATIC 0x144
  15. #define UBWC_CTRL_2 0x150
  16. #define UBWC_PREDICTION_MODE 0x154
  17. #define FLD_SPLIT_DISPLAY_CMD BIT(1)
  18. #define FLD_SMART_PANEL_FREE_RUN BIT(2)
  19. #define FLD_INTF_1_SW_TRG_MUX BIT(4)
  20. #define FLD_INTF_2_SW_TRG_MUX BIT(8)
  21. #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
  22. #define MDP_DSPP_DBGBUS_CTRL 0x348
  23. #define MDP_DSPP_DBGBUS_STATUS 0x34C
  24. #define DANGER_STATUS 0x360
  25. #define SAFE_STATUS 0x364
  26. #define TE_LINE_INTERVAL 0x3F4
  27. #define TRAFFIC_SHAPER_EN BIT(31)
  28. #define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
  29. #define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
  30. #define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
  31. #define MDP_WD_TIMER_0_CTL 0x380
  32. #define MDP_WD_TIMER_0_CTL2 0x384
  33. #define MDP_WD_TIMER_0_LOAD_VALUE 0x388
  34. #define MDP_WD_TIMER_1_CTL 0x390
  35. #define MDP_WD_TIMER_1_CTL2 0x394
  36. #define MDP_WD_TIMER_1_LOAD_VALUE 0x398
  37. #define MDP_PERIPH_DBGBUS_CTRL 0x418
  38. #define MDP_WD_TIMER_2_CTL 0x420
  39. #define MDP_WD_TIMER_2_CTL2 0x424
  40. #define MDP_WD_TIMER_2_LOAD_VALUE 0x428
  41. #define MDP_WD_TIMER_3_CTL 0x430
  42. #define MDP_WD_TIMER_3_CTL2 0x434
  43. #define MDP_WD_TIMER_3_LOAD_VALUE 0x438
  44. #define MDP_WD_TIMER_4_CTL 0x440
  45. #define MDP_WD_TIMER_4_CTL2 0x444
  46. #define MDP_WD_TIMER_4_LOAD_VALUE 0x448
  47. #define MDP_PERIPH_TOP0 0x380
  48. #define MDP_SSPP_TOP2 0x3A8
  49. #define AUTOREFRESH_TEST_POINT 0x2
  50. #define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  51. #define DCE_SEL 0x450
  52. #define MDP_SID_V2_VIG0 0x000
  53. #define MDP_SID_V2_DMA0 0x040
  54. #define MDP_SID_V2_CTL_0 0x100
  55. #define MDP_SID_V2_LTM0 0x400
  56. #define MDP_SID_V2_IPC_READ 0x200
  57. #define MDP_SID_V2_LUTDMA_RD 0x300
  58. #define MDP_SID_V2_LUTDMA_WR 0x304
  59. #define MDP_SID_V2_LUTDMA_SB_RD 0x308
  60. #define MDP_SID_V2_DSI0 0x500
  61. #define MDP_SID_V2_DSI1 0x504
  62. #define MDP_SID_VIG0 0x0
  63. #define MDP_SID_VIG1 0x4
  64. #define MDP_SID_VIG2 0x8
  65. #define MDP_SID_VIG3 0xC
  66. #define MDP_SID_DMA0 0x10
  67. #define MDP_SID_DMA1 0x14
  68. #define MDP_SID_DMA2 0x18
  69. #define MDP_SID_DMA3 0x1C
  70. #define MDP_SID_ROT_RD 0x20
  71. #define MDP_SID_ROT_WR 0x24
  72. #define MDP_SID_WB2 0x28
  73. #define MDP_SID_XIN7 0x2C
  74. #define ROT_SID_ID_VAL 0x1c
  75. static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
  76. struct split_pipe_cfg *cfg)
  77. {
  78. struct sde_hw_blk_reg_map *c;
  79. u32 upper_pipe = 0;
  80. u32 lower_pipe = 0;
  81. if (!mdp || !cfg)
  82. return;
  83. c = &mdp->hw;
  84. if (cfg->en) {
  85. if (cfg->mode == INTF_MODE_CMD) {
  86. lower_pipe = FLD_SPLIT_DISPLAY_CMD;
  87. /* interface controlling sw trigger */
  88. if (cfg->intf == INTF_2)
  89. lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
  90. else
  91. lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
  92. /* free run */
  93. if (cfg->pp_split_slave != INTF_MAX)
  94. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  95. upper_pipe = lower_pipe;
  96. /* smart panel align mode */
  97. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  98. } else {
  99. if (cfg->intf == INTF_2) {
  100. lower_pipe = FLD_INTF_1_SW_TRG_MUX;
  101. upper_pipe = FLD_INTF_2_SW_TRG_MUX;
  102. } else {
  103. lower_pipe = FLD_INTF_2_SW_TRG_MUX;
  104. upper_pipe = FLD_INTF_1_SW_TRG_MUX;
  105. }
  106. }
  107. }
  108. SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
  109. SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
  110. SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
  111. SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
  112. }
  113. static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
  114. struct split_pipe_cfg *cfg)
  115. {
  116. u32 ppb_config = 0x0;
  117. u32 ppb_control = 0x0;
  118. if (!mdp || !cfg)
  119. return;
  120. if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
  121. ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
  122. ppb_config |= BIT(16); /* split enable */
  123. ppb_control = BIT(5); /* horz split*/
  124. }
  125. if (cfg->pp_split_index) {
  126. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
  127. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
  128. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
  129. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
  130. } else {
  131. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
  132. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
  133. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
  134. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
  135. }
  136. }
  137. static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
  138. struct cdm_output_cfg *cfg)
  139. {
  140. struct sde_hw_blk_reg_map *c;
  141. u32 out_ctl = 0;
  142. if (!mdp || !cfg)
  143. return;
  144. c = &mdp->hw;
  145. if (cfg->wb_en)
  146. out_ctl |= BIT(24);
  147. else if (cfg->intf_en)
  148. out_ctl |= BIT(19);
  149. SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
  150. }
  151. static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
  152. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  153. {
  154. struct sde_hw_blk_reg_map *c;
  155. u32 reg_off, bit_off;
  156. u32 reg_val, new_val;
  157. bool clk_forced_on;
  158. if (!mdp)
  159. return false;
  160. c = &mdp->hw;
  161. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
  162. return false;
  163. reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
  164. bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
  165. reg_val = SDE_REG_READ(c, reg_off);
  166. if (enable)
  167. new_val = reg_val | BIT(bit_off);
  168. else
  169. new_val = reg_val & ~BIT(bit_off);
  170. SDE_REG_WRITE(c, reg_off, new_val);
  171. wmb(); /* ensure write finished before progressing */
  172. clk_forced_on = !(reg_val & BIT(bit_off));
  173. return clk_forced_on;
  174. }
  175. static int sde_hw_get_clk_ctrl_status(struct sde_hw_mdp *mdp,
  176. enum sde_clk_ctrl_type clk_ctrl, bool *status)
  177. {
  178. struct sde_hw_blk_reg_map *c;
  179. u32 reg_off, bit_off;
  180. if (!mdp)
  181. return -EINVAL;
  182. c = &mdp->hw;
  183. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX ||
  184. !mdp->caps->clk_status[clk_ctrl].reg_off)
  185. return -EINVAL;
  186. reg_off = mdp->caps->clk_status[clk_ctrl].reg_off;
  187. bit_off = mdp->caps->clk_status[clk_ctrl].bit_off;
  188. *status = SDE_REG_READ(c, reg_off) & BIT(bit_off);
  189. return 0;
  190. }
  191. static void _update_vsync_source(struct sde_hw_mdp *mdp,
  192. struct sde_vsync_source_cfg *cfg)
  193. {
  194. struct sde_hw_blk_reg_map *c;
  195. u32 reg, wd_load_value, wd_ctl, wd_ctl2;
  196. if (!mdp || !cfg)
  197. return;
  198. c = &mdp->hw;
  199. if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
  200. cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
  201. switch (cfg->vsync_source) {
  202. case SDE_VSYNC_SOURCE_WD_TIMER_4:
  203. wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
  204. wd_ctl = MDP_WD_TIMER_4_CTL;
  205. wd_ctl2 = MDP_WD_TIMER_4_CTL2;
  206. break;
  207. case SDE_VSYNC_SOURCE_WD_TIMER_3:
  208. wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
  209. wd_ctl = MDP_WD_TIMER_3_CTL;
  210. wd_ctl2 = MDP_WD_TIMER_3_CTL2;
  211. break;
  212. case SDE_VSYNC_SOURCE_WD_TIMER_2:
  213. wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
  214. wd_ctl = MDP_WD_TIMER_2_CTL;
  215. wd_ctl2 = MDP_WD_TIMER_2_CTL2;
  216. break;
  217. case SDE_VSYNC_SOURCE_WD_TIMER_1:
  218. wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
  219. wd_ctl = MDP_WD_TIMER_1_CTL;
  220. wd_ctl2 = MDP_WD_TIMER_1_CTL2;
  221. break;
  222. case SDE_VSYNC_SOURCE_WD_TIMER_0:
  223. default:
  224. wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
  225. wd_ctl = MDP_WD_TIMER_0_CTL;
  226. wd_ctl2 = MDP_WD_TIMER_0_CTL2;
  227. break;
  228. }
  229. SDE_REG_WRITE(c, wd_load_value, CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
  230. SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
  231. reg = SDE_REG_READ(c, wd_ctl2);
  232. reg |= BIT(8); /* enable heartbeat timer */
  233. reg |= BIT(0); /* enable WD timer */
  234. SDE_REG_WRITE(c, wd_ctl2, reg);
  235. /* make sure that timers are enabled/disabled for vsync state */
  236. wmb();
  237. }
  238. }
  239. static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
  240. struct sde_vsync_source_cfg *cfg)
  241. {
  242. struct sde_hw_blk_reg_map *c;
  243. u32 reg, i;
  244. static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
  245. if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
  246. return;
  247. c = &mdp->hw;
  248. reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
  249. for (i = 0; i < cfg->pp_count; i++) {
  250. int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
  251. if (pp_idx >= ARRAY_SIZE(pp_offset))
  252. continue;
  253. reg &= ~(0xf << pp_offset[pp_idx]);
  254. reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
  255. }
  256. SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
  257. _update_vsync_source(mdp, cfg);
  258. }
  259. static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
  260. struct sde_vsync_source_cfg *cfg)
  261. {
  262. _update_vsync_source(mdp, cfg);
  263. }
  264. void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
  265. {
  266. struct sde_hw_blk_reg_map c;
  267. u32 ubwc_dec_version;
  268. u32 ubwc_enc_version;
  269. if (!mdp || !m)
  270. return;
  271. /* force blk offset to zero to access beginning of register region */
  272. c = mdp->hw;
  273. c.blk_off = 0x0;
  274. ubwc_dec_version = SDE_REG_READ(&c, UBWC_DEC_HW_VERSION);
  275. ubwc_enc_version = m->ubwc_rev;
  276. if (IS_UBWC_40_SUPPORTED(ubwc_dec_version) || IS_UBWC_43_SUPPORTED(ubwc_dec_version)) {
  277. u32 ver = IS_UBWC_43_SUPPORTED(ubwc_dec_version) ? 3 : 2;
  278. u32 mode = 1;
  279. u32 reg = (m->mdp[0].ubwc_swizzle & 0x7) |
  280. ((m->mdp[0].ubwc_static & 0x1) << 3) |
  281. ((m->mdp[0].highest_bank_bit & 0x7) << 4) |
  282. ((m->macrotile_mode & 0x1) << 12);
  283. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version)) {
  284. ver = 1;
  285. mode = 0;
  286. }
  287. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  288. SDE_REG_WRITE(&c, UBWC_CTRL_2, ver);
  289. SDE_REG_WRITE(&c, UBWC_PREDICTION_MODE, mode);
  290. } else if (IS_UBWC_20_SUPPORTED(ubwc_dec_version)) {
  291. SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
  292. } else if (IS_UBWC_30_SUPPORTED(ubwc_dec_version)) {
  293. u32 reg = m->mdp[0].ubwc_static |
  294. (m->mdp[0].ubwc_swizzle & 0x1) |
  295. ((m->mdp[0].highest_bank_bit & 0x3) << 4) |
  296. ((m->macrotile_mode & 0x1) << 12);
  297. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version))
  298. reg |= BIT(10);
  299. if (IS_UBWC_10_SUPPORTED(ubwc_enc_version))
  300. reg |= BIT(8);
  301. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  302. } else {
  303. SDE_ERROR("unsupported ubwc decoder version 0x%08x\n", ubwc_dec_version);
  304. }
  305. }
  306. static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp)
  307. {
  308. struct sde_hw_blk_reg_map *c;
  309. if (!mdp)
  310. return;
  311. c = &mdp->hw;
  312. SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
  313. }
  314. static void sde_hw_mdp_events(struct sde_hw_mdp *mdp, bool enable)
  315. {
  316. struct sde_hw_blk_reg_map *c;
  317. if (!mdp)
  318. return;
  319. c = &mdp->hw;
  320. SDE_REG_WRITE(c, HW_EVENTS_CTL, enable);
  321. }
  322. void sde_hw_set_vm_sid_v2(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  323. {
  324. u32 offset = 0;
  325. int i;
  326. if (!sid || !m)
  327. return;
  328. for (i = 0; i < m->ctl_count; i++) {
  329. offset = MDP_SID_V2_CTL_0 + (i * 4);
  330. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  331. }
  332. for (i = 0; i < m->ltm_count; i++) {
  333. offset = MDP_SID_V2_LTM0 + (i * 4);
  334. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  335. }
  336. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_IPC_READ, vm << 2);
  337. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_RD, vm << 2);
  338. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_WR, vm << 2);
  339. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_SB_RD, vm << 2);
  340. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI0, vm << 2);
  341. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI1, vm << 2);
  342. }
  343. void sde_hw_set_vm_sid(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  344. {
  345. if (!sid || !m)
  346. return;
  347. SDE_REG_WRITE(&sid->hw, MDP_SID_XIN7, vm << 2);
  348. }
  349. struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
  350. u32 sid_len, const struct sde_mdss_cfg *m)
  351. {
  352. struct sde_hw_sid *c;
  353. c = kzalloc(sizeof(*c), GFP_KERNEL);
  354. if (!c)
  355. return ERR_PTR(-ENOMEM);
  356. c->hw.base_off = addr;
  357. c->hw.blk_off = 0;
  358. c->hw.length = sid_len;
  359. c->hw.hw_rev = m->hw_rev;
  360. c->hw.log_mask = SDE_DBG_MASK_SID;
  361. if (IS_SDE_SID_REV_200(m->sid_rev))
  362. c->ops.set_vm_sid = sde_hw_set_vm_sid_v2;
  363. else
  364. c->ops.set_vm_sid = sde_hw_set_vm_sid;
  365. return c;
  366. }
  367. void sde_hw_set_rotator_sid(struct sde_hw_sid *sid)
  368. {
  369. if (!sid)
  370. return;
  371. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_RD, ROT_SID_ID_VAL);
  372. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_WR, ROT_SID_ID_VAL);
  373. }
  374. void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm,
  375. struct sde_mdss_cfg *m)
  376. {
  377. u32 offset = 0;
  378. u32 vig_sid_offset = MDP_SID_VIG0;
  379. u32 dma_sid_offset = MDP_SID_DMA0;
  380. if (!sid)
  381. return;
  382. if (IS_SDE_SID_REV_200(m->sid_rev)) {
  383. vig_sid_offset = MDP_SID_V2_VIG0;
  384. dma_sid_offset = MDP_SID_V2_DMA0;
  385. }
  386. if (SDE_SSPP_VALID_VIG(pipe))
  387. offset = vig_sid_offset + ((pipe - SSPP_VIG0) * 4);
  388. else if (SDE_SSPP_VALID_DMA(pipe))
  389. offset = dma_sid_offset + ((pipe - SSPP_DMA0) * 4);
  390. else
  391. return;
  392. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  393. }
  394. static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
  395. bool dual, bool dspp_out)
  396. {
  397. u32 value = dspp_out ? 0x4 : 0x0;
  398. SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
  399. if (dual) {
  400. value |= 0x1;
  401. SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
  402. }
  403. }
  404. static void sde_hw_set_hdr_plus_metadata(struct sde_hw_mdp *mdp,
  405. u8 *payload, u32 len, u32 stream_id)
  406. {
  407. u32 i, b;
  408. u32 length = len - 1;
  409. u32 d_offset, nb_offset, data = 0;
  410. const u32 dword_size = sizeof(u32);
  411. bool is_4k_aligned = mdp->caps->features &
  412. BIT(SDE_MDP_DHDR_MEMPOOL_4K);
  413. if (!payload || !len) {
  414. SDE_ERROR("invalid payload with length: %d\n", len);
  415. return;
  416. }
  417. if (stream_id) {
  418. if (is_4k_aligned) {
  419. d_offset = DP_DHDR_MEM_POOL_1_DATA_4K;
  420. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES_4K;
  421. } else {
  422. d_offset = DP_DHDR_MEM_POOL_1_DATA;
  423. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES;
  424. }
  425. } else {
  426. if (is_4k_aligned) {
  427. d_offset = DP_DHDR_MEM_POOL_0_DATA_4K;
  428. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES_4K;
  429. } else {
  430. d_offset = DP_DHDR_MEM_POOL_0_DATA;
  431. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES;
  432. }
  433. }
  434. /* payload[0] is set in VSCEXT header byte 1, skip programming here */
  435. SDE_REG_WRITE(&mdp->hw, nb_offset, length);
  436. for (i = 1; i < len; i += dword_size) {
  437. for (b = 0; (i + b) < len && b < dword_size; b++)
  438. data |= payload[i + b] << (8 * b);
  439. SDE_REG_WRITE(&mdp->hw, d_offset, data);
  440. data = 0;
  441. }
  442. }
  443. static u32 sde_hw_get_autorefresh_status(struct sde_hw_mdp *mdp, u32 intf_idx)
  444. {
  445. struct sde_hw_blk_reg_map *c;
  446. u32 autorefresh_status;
  447. u32 blk_id = (intf_idx == INTF_2) ? 65 : 64;
  448. if (!mdp)
  449. return 0;
  450. c = &mdp->hw;
  451. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL,
  452. TEST_MASK(blk_id, AUTOREFRESH_TEST_POINT));
  453. SDE_REG_WRITE(&mdp->hw, MDP_DSPP_DBGBUS_CTRL, 0x7001);
  454. wmb(); /* make sure test bits were written */
  455. autorefresh_status = SDE_REG_READ(&mdp->hw, MDP_DSPP_DBGBUS_STATUS);
  456. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL, 0x0);
  457. return autorefresh_status;
  458. }
  459. static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
  460. unsigned long cap)
  461. {
  462. ops->setup_split_pipe = sde_hw_setup_split_pipe;
  463. ops->setup_pp_split = sde_hw_setup_pp_split;
  464. ops->setup_cdm_output = sde_hw_setup_cdm_output;
  465. ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
  466. ops->get_clk_ctrl_status = sde_hw_get_clk_ctrl_status;
  467. ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
  468. ops->reset_ubwc = sde_hw_reset_ubwc;
  469. ops->intf_audio_select = sde_hw_intf_audio_select;
  470. ops->set_mdp_hw_events = sde_hw_mdp_events;
  471. if (cap & BIT(SDE_MDP_VSYNC_SEL))
  472. ops->setup_vsync_source = sde_hw_setup_vsync_source;
  473. else if (cap & BIT(SDE_MDP_WD_TIMER))
  474. ops->setup_vsync_source = sde_hw_setup_vsync_source_v1;
  475. if (cap & BIT(SDE_MDP_DHDR_MEMPOOL_4K) ||
  476. cap & BIT(SDE_MDP_DHDR_MEMPOOL))
  477. ops->set_hdr_plus_metadata = sde_hw_set_hdr_plus_metadata;
  478. ops->get_autorefresh_status = sde_hw_get_autorefresh_status;
  479. }
  480. static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
  481. const struct sde_mdss_cfg *m,
  482. void __iomem *addr,
  483. struct sde_hw_blk_reg_map *b)
  484. {
  485. int i;
  486. if (!m || !addr || !b)
  487. return ERR_PTR(-EINVAL);
  488. for (i = 0; i < m->mdp_count; i++) {
  489. if (mdp == m->mdp[i].id) {
  490. b->base_off = addr;
  491. b->blk_off = m->mdp[i].base;
  492. b->length = m->mdp[i].len;
  493. b->hw_rev = m->hw_rev;
  494. b->log_mask = SDE_DBG_MASK_TOP;
  495. return &m->mdp[i];
  496. }
  497. }
  498. return ERR_PTR(-EINVAL);
  499. }
  500. struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
  501. void __iomem *addr,
  502. const struct sde_mdss_cfg *m)
  503. {
  504. struct sde_hw_mdp *mdp;
  505. const struct sde_mdp_cfg *cfg;
  506. if (!addr || !m)
  507. return ERR_PTR(-EINVAL);
  508. mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
  509. if (!mdp)
  510. return ERR_PTR(-ENOMEM);
  511. cfg = _top_offset(idx, m, addr, &mdp->hw);
  512. if (IS_ERR_OR_NULL(cfg)) {
  513. kfree(mdp);
  514. return ERR_PTR(-EINVAL);
  515. }
  516. /*
  517. * Assign ops
  518. */
  519. mdp->idx = idx;
  520. mdp->caps = cfg;
  521. _setup_mdp_ops(&mdp->ops, mdp->caps->features);
  522. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "mdss_hw", 0,
  523. m->mdss_hw_block_size, 0);
  524. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &m->mdp[0].features)) {
  525. char name[SDE_HW_BLK_NAME_LEN];
  526. snprintf(name, sizeof(name), "%s_1", cfg->name);
  527. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, mdp->hw.blk_off,
  528. mdp->hw.blk_off + MDP_PERIPH_TOP0, mdp->hw.xin_id);
  529. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, name, mdp->hw.blk_off + MDP_SSPP_TOP2,
  530. mdp->hw.blk_off + mdp->hw.length, mdp->hw.xin_id);
  531. } else {
  532. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  533. mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
  534. mdp->hw.xin_id);
  535. }
  536. sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
  537. return mdp;
  538. }
  539. void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
  540. {
  541. kfree(mdp);
  542. }