sde_hw_top.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include "sde_hwio.h"
  8. #include "sde_hw_catalog.h"
  9. #include "sde_hw_top.h"
  10. #include "sde_dbg.h"
  11. #include "sde_kms.h"
  12. #define SSPP_SPARE 0x28
  13. #define UBWC_DEC_HW_VERSION 0x058
  14. #define UBWC_STATIC 0x144
  15. #define UBWC_CTRL_2 0x150
  16. #define UBWC_PREDICTION_MODE 0x154
  17. #define FLD_SPLIT_DISPLAY_CMD BIT(1)
  18. #define FLD_SMART_PANEL_FREE_RUN BIT(2)
  19. #define FLD_INTF_1_SW_TRG_MUX BIT(4)
  20. #define FLD_INTF_2_SW_TRG_MUX BIT(8)
  21. #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
  22. #define MDP_DSPP_DBGBUS_CTRL 0x348
  23. #define MDP_DSPP_DBGBUS_STATUS 0x34C
  24. #define DANGER_STATUS 0x360
  25. #define SAFE_STATUS 0x364
  26. #define TE_LINE_INTERVAL 0x3F4
  27. #define TRAFFIC_SHAPER_EN BIT(31)
  28. #define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
  29. #define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
  30. #define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
  31. #define MDP_WD_TIMER_0_CTL 0x380
  32. #define MDP_WD_TIMER_0_CTL2 0x384
  33. #define MDP_WD_TIMER_0_LOAD_VALUE 0x388
  34. #define MDP_WD_TIMER_1_CTL 0x390
  35. #define MDP_WD_TIMER_1_CTL2 0x394
  36. #define MDP_WD_TIMER_1_LOAD_VALUE 0x398
  37. #define MDP_PERIPH_DBGBUS_CTRL 0x418
  38. #define MDP_WD_TIMER_2_CTL 0x420
  39. #define MDP_WD_TIMER_2_CTL2 0x424
  40. #define MDP_WD_TIMER_2_LOAD_VALUE 0x428
  41. #define MDP_WD_TIMER_3_CTL 0x430
  42. #define MDP_WD_TIMER_3_CTL2 0x434
  43. #define MDP_WD_TIMER_3_LOAD_VALUE 0x438
  44. #define MDP_WD_TIMER_4_CTL 0x440
  45. #define MDP_WD_TIMER_4_CTL2 0x444
  46. #define MDP_WD_TIMER_4_LOAD_VALUE 0x448
  47. #define MDP_PERIPH_TOP0 0x380
  48. #define MDP_SSPP_TOP2 0x3A8
  49. #define AUTOREFRESH_TEST_POINT 0x2
  50. #define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  51. #define DCE_SEL 0x450
  52. #define MDP_SID_V2_VIG0 0x000
  53. #define MDP_SID_V2_DMA0 0x040
  54. #define MDP_SID_V2_CTL_0 0x100
  55. #define MDP_SID_V2_LTM0 0x400
  56. #define MDP_SID_V2_IPC_READ 0x200
  57. #define MDP_SID_V2_LUTDMA_RD 0x300
  58. #define MDP_SID_V2_LUTDMA_WR 0x304
  59. #define MDP_SID_V2_LUTDMA_SB_RD 0x308
  60. #define MDP_SID_V2_LUTDMA_VM_0 0x310
  61. #define MDP_SID_V2_DSI0 0x500
  62. #define MDP_SID_V2_DSI1 0x504
  63. #define MDP_SID_VIG0 0x0
  64. #define MDP_SID_VIG1 0x4
  65. #define MDP_SID_VIG2 0x8
  66. #define MDP_SID_VIG3 0xC
  67. #define MDP_SID_DMA0 0x10
  68. #define MDP_SID_DMA1 0x14
  69. #define MDP_SID_DMA2 0x18
  70. #define MDP_SID_DMA3 0x1C
  71. #define MDP_SID_ROT_RD 0x20
  72. #define MDP_SID_ROT_WR 0x24
  73. #define MDP_SID_WB2 0x28
  74. #define MDP_SID_XIN7 0x2C
  75. #define ROT_SID_ID_VAL 0x1c
  76. /* HW Fences */
  77. #define MDP_CTL_HW_FENCE_CTRL 0x14000
  78. #define MDP_CTL_HW_FENCE_ID_START_ADDR 0x14004
  79. #define MDP_CTL_HW_FENCE_ID_STATUS 0x14008
  80. #define MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL 0x1400c
  81. #define MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP0 0x14010
  82. #define MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP1 0x14014
  83. #define MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP0 0x14018
  84. #define MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP1 0x1401c
  85. #define MDP_CTL_HW_FENCE_QOS 0x14020
  86. #define MDP_CTL_HW_FENCE_IDn_ISR 0x14050
  87. #define MDP_CTL_HW_FENCE_IDm_ADDR 0x14054
  88. #define MDP_CTL_HW_FENCE_IDm_DATA 0x14058
  89. #define MDP_CTL_HW_FENCE_IDm_MASK 0x1405c
  90. #define MDP_CTL_HW_FENCE_IDm_ATTR 0x14060
  91. #define HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ba, p, c) ((ba+0xc) + (0x40000*p) + (0x1000*c))
  92. #define HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ba, p, c) ((ba+0x10) + (0x40000*p) + (0x1000*c))
  93. #define MDP_CTL_HW_FENCE_ID_OFFSET_n(base, n) (base + (0x14*n))
  94. #define MDP_CTL_HW_FENCE_ID_OFFSET_m(base, m) (base + (0x14*m))
  95. #define MDP_CTL_FENCE_ATTRS(devicetype, size, resp_req) \
  96. (((resp_req & 0x1) << 16) | ((size & 0x7) << 4) | (devicetype & 0xf))
  97. #define MDP_CTL_FENCE_ISR_OP_CODE(opcode, op0, op1, op2) \
  98. (((op2 & 0xff) << 24) | ((op1 & 0xff) << 16) | ((op0 & 0xff) << 8) | (opcode & 0xff))
  99. #define HW_FENCE_DPU_INPUT_FENCE_START_N 0
  100. #define HW_FENCE_DPU_OUTPUT_FENCE_START_N 4
  101. #define HW_FENCE_IPCC_FENCE_PROTOCOL_ID 4
  102. #define HW_FENCE_DPU_FENCE_PROTOCOL_ID 3
  103. static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
  104. struct split_pipe_cfg *cfg)
  105. {
  106. struct sde_hw_blk_reg_map *c;
  107. u32 upper_pipe = 0;
  108. u32 lower_pipe = 0;
  109. if (!mdp || !cfg)
  110. return;
  111. c = &mdp->hw;
  112. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &mdp->caps->features) && cfg->en) {
  113. /* avoid programming of legacy bits like SW_TRG_MUX for new targets */
  114. if (cfg->mode == INTF_MODE_CMD) {
  115. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  116. upper_pipe = lower_pipe;
  117. if (cfg->pp_split_slave != INTF_MAX)
  118. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  119. }
  120. } else if (cfg->en) {
  121. if (cfg->mode == INTF_MODE_CMD) {
  122. lower_pipe = FLD_SPLIT_DISPLAY_CMD;
  123. /* interface controlling sw trigger */
  124. if (cfg->intf == INTF_2)
  125. lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
  126. else
  127. lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
  128. /* free run */
  129. if (cfg->pp_split_slave != INTF_MAX)
  130. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  131. upper_pipe = lower_pipe;
  132. /* smart panel align mode */
  133. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  134. } else {
  135. if (cfg->intf == INTF_2) {
  136. lower_pipe = FLD_INTF_1_SW_TRG_MUX;
  137. upper_pipe = FLD_INTF_2_SW_TRG_MUX;
  138. } else {
  139. lower_pipe = FLD_INTF_2_SW_TRG_MUX;
  140. upper_pipe = FLD_INTF_1_SW_TRG_MUX;
  141. }
  142. }
  143. }
  144. SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
  145. SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
  146. SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
  147. SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
  148. }
  149. static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
  150. struct split_pipe_cfg *cfg)
  151. {
  152. u32 ppb_config = 0x0;
  153. u32 ppb_control = 0x0;
  154. if (!mdp || !cfg)
  155. return;
  156. if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
  157. ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
  158. ppb_config |= BIT(16); /* split enable */
  159. ppb_control = BIT(5); /* horz split*/
  160. }
  161. if (cfg->pp_split_index) {
  162. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
  163. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
  164. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
  165. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
  166. } else {
  167. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
  168. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
  169. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
  170. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
  171. }
  172. }
  173. static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
  174. struct cdm_output_cfg *cfg)
  175. {
  176. struct sde_hw_blk_reg_map *c;
  177. u32 out_ctl = 0;
  178. if (!mdp || !cfg)
  179. return;
  180. c = &mdp->hw;
  181. if (cfg->wb_en)
  182. out_ctl |= BIT(24);
  183. else if (cfg->intf_en)
  184. out_ctl |= BIT(19);
  185. SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
  186. }
  187. static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
  188. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  189. {
  190. struct sde_hw_blk_reg_map *c;
  191. u32 reg_off, bit_off;
  192. u32 reg_val, new_val;
  193. bool clk_forced_on;
  194. if (!mdp)
  195. return false;
  196. c = &mdp->hw;
  197. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
  198. return false;
  199. reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
  200. bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
  201. reg_val = SDE_REG_READ(c, reg_off);
  202. if (enable)
  203. new_val = reg_val | BIT(bit_off);
  204. else
  205. new_val = reg_val & ~BIT(bit_off);
  206. SDE_REG_WRITE(c, reg_off, new_val);
  207. wmb(); /* ensure write finished before progressing */
  208. clk_forced_on = !(reg_val & BIT(bit_off));
  209. return clk_forced_on;
  210. }
  211. static int sde_hw_get_clk_ctrl_status(struct sde_hw_mdp *mdp,
  212. enum sde_clk_ctrl_type clk_ctrl, bool *status)
  213. {
  214. struct sde_hw_blk_reg_map *c;
  215. u32 reg_off, bit_off;
  216. if (!mdp)
  217. return -EINVAL;
  218. c = &mdp->hw;
  219. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX ||
  220. !mdp->caps->clk_status[clk_ctrl].reg_off)
  221. return -EINVAL;
  222. reg_off = mdp->caps->clk_status[clk_ctrl].reg_off;
  223. bit_off = mdp->caps->clk_status[clk_ctrl].bit_off;
  224. *status = SDE_REG_READ(c, reg_off) & BIT(bit_off);
  225. return 0;
  226. }
  227. static void _update_vsync_source(struct sde_hw_mdp *mdp,
  228. struct sde_vsync_source_cfg *cfg)
  229. {
  230. struct sde_hw_blk_reg_map *c;
  231. u32 reg, wd_load_value, wd_ctl, wd_ctl2;
  232. if (!mdp || !cfg)
  233. return;
  234. c = &mdp->hw;
  235. if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
  236. cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
  237. switch (cfg->vsync_source) {
  238. case SDE_VSYNC_SOURCE_WD_TIMER_4:
  239. wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
  240. wd_ctl = MDP_WD_TIMER_4_CTL;
  241. wd_ctl2 = MDP_WD_TIMER_4_CTL2;
  242. break;
  243. case SDE_VSYNC_SOURCE_WD_TIMER_3:
  244. wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
  245. wd_ctl = MDP_WD_TIMER_3_CTL;
  246. wd_ctl2 = MDP_WD_TIMER_3_CTL2;
  247. break;
  248. case SDE_VSYNC_SOURCE_WD_TIMER_2:
  249. wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
  250. wd_ctl = MDP_WD_TIMER_2_CTL;
  251. wd_ctl2 = MDP_WD_TIMER_2_CTL2;
  252. break;
  253. case SDE_VSYNC_SOURCE_WD_TIMER_1:
  254. wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
  255. wd_ctl = MDP_WD_TIMER_1_CTL;
  256. wd_ctl2 = MDP_WD_TIMER_1_CTL2;
  257. break;
  258. case SDE_VSYNC_SOURCE_WD_TIMER_0:
  259. default:
  260. wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
  261. wd_ctl = MDP_WD_TIMER_0_CTL;
  262. wd_ctl2 = MDP_WD_TIMER_0_CTL2;
  263. break;
  264. }
  265. SDE_REG_WRITE(c, wd_load_value, CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
  266. SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
  267. reg = SDE_REG_READ(c, wd_ctl2);
  268. reg |= BIT(8); /* enable heartbeat timer */
  269. reg |= BIT(0); /* enable WD timer */
  270. SDE_REG_WRITE(c, wd_ctl2, reg);
  271. /* make sure that timers are enabled/disabled for vsync state */
  272. wmb();
  273. }
  274. }
  275. static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
  276. struct sde_vsync_source_cfg *cfg)
  277. {
  278. struct sde_hw_blk_reg_map *c;
  279. u32 reg, i;
  280. static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
  281. if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
  282. return;
  283. c = &mdp->hw;
  284. reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
  285. for (i = 0; i < cfg->pp_count; i++) {
  286. int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
  287. if (pp_idx >= ARRAY_SIZE(pp_offset))
  288. continue;
  289. reg &= ~(0xf << pp_offset[pp_idx]);
  290. reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
  291. }
  292. SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
  293. _update_vsync_source(mdp, cfg);
  294. }
  295. static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
  296. struct sde_vsync_source_cfg *cfg)
  297. {
  298. _update_vsync_source(mdp, cfg);
  299. }
  300. void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
  301. {
  302. struct sde_hw_blk_reg_map c;
  303. u32 ubwc_dec_version;
  304. u32 ubwc_enc_version;
  305. if (!mdp || !m)
  306. return;
  307. /* force blk offset to zero to access beginning of register region */
  308. c = mdp->hw;
  309. c.blk_off = 0x0;
  310. ubwc_dec_version = SDE_REG_READ(&c, UBWC_DEC_HW_VERSION);
  311. ubwc_enc_version = m->ubwc_rev;
  312. if (IS_UBWC_40_SUPPORTED(ubwc_dec_version) || IS_UBWC_43_SUPPORTED(ubwc_dec_version)) {
  313. u32 ver = IS_UBWC_43_SUPPORTED(ubwc_dec_version) ? 3 : 2;
  314. u32 mode = 1;
  315. u32 reg = (m->mdp[0].ubwc_swizzle & 0x7) |
  316. ((m->mdp[0].ubwc_static & 0x1) << 3) |
  317. ((m->mdp[0].highest_bank_bit & 0x7) << 4) |
  318. ((m->macrotile_mode & 0x1) << 12);
  319. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version)) {
  320. ver = 1;
  321. mode = 0;
  322. }
  323. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  324. SDE_REG_WRITE(&c, UBWC_CTRL_2, ver);
  325. SDE_REG_WRITE(&c, UBWC_PREDICTION_MODE, mode);
  326. } else if (IS_UBWC_20_SUPPORTED(ubwc_dec_version)) {
  327. SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
  328. } else if (IS_UBWC_30_SUPPORTED(ubwc_dec_version)) {
  329. u32 reg = m->mdp[0].ubwc_static |
  330. (m->mdp[0].ubwc_swizzle & 0x1) |
  331. ((m->mdp[0].highest_bank_bit & 0x3) << 4) |
  332. ((m->macrotile_mode & 0x1) << 12);
  333. if (IS_UBWC_30_SUPPORTED(ubwc_enc_version))
  334. reg |= BIT(10);
  335. if (IS_UBWC_10_SUPPORTED(ubwc_enc_version))
  336. reg |= BIT(8);
  337. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  338. } else {
  339. SDE_ERROR("unsupported ubwc decoder version 0x%08x\n", ubwc_dec_version);
  340. }
  341. }
  342. static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp)
  343. {
  344. struct sde_hw_blk_reg_map *c;
  345. if (!mdp)
  346. return;
  347. c = &mdp->hw;
  348. SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
  349. }
  350. static void sde_hw_mdp_events(struct sde_hw_mdp *mdp, bool enable)
  351. {
  352. struct sde_hw_blk_reg_map *c;
  353. if (!mdp)
  354. return;
  355. c = &mdp->hw;
  356. SDE_REG_WRITE(c, HW_EVENTS_CTL, enable);
  357. }
  358. void sde_hw_set_vm_sid_v2(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  359. {
  360. u32 offset = 0;
  361. int i;
  362. if (!sid || !m)
  363. return;
  364. for (i = 0; i < m->ctl_count; i++) {
  365. offset = MDP_SID_V2_CTL_0 + (i * 4);
  366. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  367. }
  368. for (i = 0; i < m->ltm_count; i++) {
  369. offset = MDP_SID_V2_LTM0 + (i * 4);
  370. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  371. }
  372. if (SDE_HW_MAJOR(sid->hw.hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_A00)) {
  373. for (i = 0; i < m->ctl_count; i++) {
  374. offset = MDP_SID_V2_LUTDMA_VM_0 + (i * 4);
  375. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  376. }
  377. }
  378. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_IPC_READ, vm << 2);
  379. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_RD, vm << 2);
  380. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_WR, vm << 2);
  381. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_LUTDMA_SB_RD, vm << 2);
  382. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI0, vm << 2);
  383. SDE_REG_WRITE(&sid->hw, MDP_SID_V2_DSI1, vm << 2);
  384. }
  385. void sde_hw_set_vm_sid(struct sde_hw_sid *sid, u32 vm, struct sde_mdss_cfg *m)
  386. {
  387. if (!sid || !m)
  388. return;
  389. SDE_REG_WRITE(&sid->hw, MDP_SID_XIN7, vm << 2);
  390. }
  391. struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
  392. u32 sid_len, const struct sde_mdss_cfg *m)
  393. {
  394. struct sde_hw_sid *c;
  395. c = kzalloc(sizeof(*c), GFP_KERNEL);
  396. if (!c)
  397. return ERR_PTR(-ENOMEM);
  398. c->hw.base_off = addr;
  399. c->hw.blk_off = 0;
  400. c->hw.length = sid_len;
  401. c->hw.hw_rev = m->hw_rev;
  402. c->hw.log_mask = SDE_DBG_MASK_SID;
  403. if (IS_SDE_SID_REV_200(m->sid_rev))
  404. c->ops.set_vm_sid = sde_hw_set_vm_sid_v2;
  405. else
  406. c->ops.set_vm_sid = sde_hw_set_vm_sid;
  407. return c;
  408. }
  409. void sde_hw_set_rotator_sid(struct sde_hw_sid *sid)
  410. {
  411. if (!sid)
  412. return;
  413. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_RD, ROT_SID_ID_VAL);
  414. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_WR, ROT_SID_ID_VAL);
  415. }
  416. void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm,
  417. struct sde_mdss_cfg *m)
  418. {
  419. u32 offset = 0;
  420. u32 vig_sid_offset = MDP_SID_VIG0;
  421. u32 dma_sid_offset = MDP_SID_DMA0;
  422. if (!sid)
  423. return;
  424. if (IS_SDE_SID_REV_200(m->sid_rev)) {
  425. vig_sid_offset = MDP_SID_V2_VIG0;
  426. dma_sid_offset = MDP_SID_V2_DMA0;
  427. }
  428. if (SDE_SSPP_VALID_VIG(pipe))
  429. offset = vig_sid_offset + ((pipe - SSPP_VIG0) * 4);
  430. else if (SDE_SSPP_VALID_DMA(pipe))
  431. offset = dma_sid_offset + ((pipe - SSPP_DMA0) * 4);
  432. else
  433. return;
  434. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  435. }
  436. static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
  437. bool dual, bool dspp_out)
  438. {
  439. u32 value = dspp_out ? 0x4 : 0x0;
  440. SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
  441. if (dual) {
  442. value |= 0x1;
  443. SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
  444. }
  445. }
  446. static void sde_hw_set_hdr_plus_metadata(struct sde_hw_mdp *mdp,
  447. u8 *payload, u32 len, u32 stream_id)
  448. {
  449. u32 i, b;
  450. u32 length = len - 1;
  451. u32 d_offset, nb_offset, data = 0;
  452. const u32 dword_size = sizeof(u32);
  453. bool is_4k_aligned = mdp->caps->features &
  454. BIT(SDE_MDP_DHDR_MEMPOOL_4K);
  455. if (!payload || !len) {
  456. SDE_ERROR("invalid payload with length: %d\n", len);
  457. return;
  458. }
  459. if (stream_id) {
  460. if (is_4k_aligned) {
  461. d_offset = DP_DHDR_MEM_POOL_1_DATA_4K;
  462. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES_4K;
  463. } else {
  464. d_offset = DP_DHDR_MEM_POOL_1_DATA;
  465. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES;
  466. }
  467. } else {
  468. if (is_4k_aligned) {
  469. d_offset = DP_DHDR_MEM_POOL_0_DATA_4K;
  470. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES_4K;
  471. } else {
  472. d_offset = DP_DHDR_MEM_POOL_0_DATA;
  473. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES;
  474. }
  475. }
  476. /* payload[0] is set in VSCEXT header byte 1, skip programming here */
  477. SDE_REG_WRITE(&mdp->hw, nb_offset, length);
  478. for (i = 1; i < len; i += dword_size) {
  479. for (b = 0; (i + b) < len && b < dword_size; b++)
  480. data |= payload[i + b] << (8 * b);
  481. SDE_REG_WRITE(&mdp->hw, d_offset, data);
  482. data = 0;
  483. }
  484. }
  485. static u32 sde_hw_get_autorefresh_status(struct sde_hw_mdp *mdp, u32 intf_idx)
  486. {
  487. struct sde_hw_blk_reg_map *c;
  488. u32 autorefresh_status;
  489. u32 blk_id = (intf_idx == INTF_2) ? 65 : 64;
  490. if (!mdp)
  491. return 0;
  492. c = &mdp->hw;
  493. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL,
  494. TEST_MASK(blk_id, AUTOREFRESH_TEST_POINT));
  495. SDE_REG_WRITE(&mdp->hw, MDP_DSPP_DBGBUS_CTRL, 0x7001);
  496. wmb(); /* make sure test bits were written */
  497. autorefresh_status = SDE_REG_READ(&mdp->hw, MDP_DSPP_DBGBUS_STATUS);
  498. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL, 0x0);
  499. return autorefresh_status;
  500. }
  501. static void sde_hw_hw_fence_timestamp_ctrl(struct sde_hw_mdp *mdp, bool enable, bool clear)
  502. {
  503. struct sde_hw_blk_reg_map c;
  504. u32 val;
  505. if (!mdp) {
  506. SDE_ERROR("invalid mdp, won't enable hw-fence timestamping\n");
  507. return;
  508. }
  509. /* start from the base-address of the mdss */
  510. c = mdp->hw;
  511. c.blk_off = 0x0;
  512. val = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL);
  513. if (enable)
  514. val |= BIT(0);
  515. else
  516. val &= ~BIT(0);
  517. if (clear)
  518. val |= BIT(1);
  519. else
  520. val &= ~BIT(1);
  521. SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_ID_TIMESTAMP_CTRL, val);
  522. }
  523. static void sde_hw_input_hw_fence_status(struct sde_hw_mdp *mdp, u64 *s_val, u64 *e_val)
  524. {
  525. u32 start_h, start_l, end_h, end_l;
  526. struct sde_hw_blk_reg_map c;
  527. if (!mdp || IS_ERR_OR_NULL(s_val) || IS_ERR_OR_NULL(e_val)) {
  528. SDE_ERROR("invalid mdp\n");
  529. return;
  530. }
  531. /* start from the base-address of the mdss */
  532. c = mdp->hw;
  533. c.blk_off = 0x0;
  534. start_l = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP0);
  535. start_h = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_START_TIMESTAMP1);
  536. *s_val = (u64)start_h << 32 | start_l;
  537. end_l = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP0);
  538. end_h = SDE_REG_READ(&c, MDP_CTL_HW_FENCE_INPUT_END_TIMESTAMP1);
  539. *e_val = (u64)end_h << 32 | end_l;
  540. /* clear the timestamps */
  541. sde_hw_hw_fence_timestamp_ctrl(mdp, false, true);
  542. wmb(); /* make sure the timestamps are cleared */
  543. }
  544. static void sde_hw_setup_hw_fences_config(struct sde_hw_mdp *mdp, u32 protocol_id,
  545. u32 client_phys_id, unsigned long ipcc_base_addr)
  546. {
  547. u32 val, offset;
  548. struct sde_hw_blk_reg_map c;
  549. if (!mdp) {
  550. SDE_ERROR("invalid mdp, won't configure hw-fences\n");
  551. return;
  552. }
  553. /* start from the base-address of the mdss */
  554. c = mdp->hw;
  555. c.blk_off = 0x0;
  556. /*select ipcc protocol id for dpu */
  557. val = (protocol_id == HW_FENCE_IPCC_FENCE_PROTOCOL_ID) ?
  558. HW_FENCE_DPU_FENCE_PROTOCOL_ID : protocol_id;
  559. SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_CTRL, val);
  560. /* configure the start of the FENCE_IDn_ISR ops for input and output fence isr's */
  561. val = (HW_FENCE_DPU_OUTPUT_FENCE_START_N << 16) | (HW_FENCE_DPU_INPUT_FENCE_START_N & 0xFF);
  562. SDE_REG_WRITE(&c, MDP_CTL_HW_FENCE_ID_START_ADDR, val);
  563. /* setup input fence isr */
  564. /* configure the attribs for the isr read_reg op */
  565. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 0);
  566. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_RECV_ID(ipcc_base_addr,
  567. protocol_id, client_phys_id);
  568. SDE_REG_WRITE(&c, offset, val);
  569. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 0);
  570. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x1);
  571. SDE_REG_WRITE(&c, offset, val);
  572. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 0);
  573. SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
  574. /* configure the attribs for the write if eq data */
  575. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_DATA, 1);
  576. SDE_REG_WRITE(&c, offset, 0x1);
  577. /* program input-fence isr ops */
  578. /* set read_reg op */
  579. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  580. HW_FENCE_DPU_INPUT_FENCE_START_N);
  581. val = MDP_CTL_FENCE_ISR_OP_CODE(0x0, 0x0, 0x0, 0x0);
  582. SDE_REG_WRITE(&c, offset, val);
  583. /* set write if eq op for flush ready */
  584. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  585. (HW_FENCE_DPU_INPUT_FENCE_START_N + 1));
  586. val = MDP_CTL_FENCE_ISR_OP_CODE(0x7, 0x0, 0x1, 0x0);
  587. SDE_REG_WRITE(&c, offset, val);
  588. /* set exit op */
  589. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  590. (HW_FENCE_DPU_INPUT_FENCE_START_N + 2));
  591. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  592. SDE_REG_WRITE(&c, offset, val);
  593. /*setup output fence isr */
  594. /* configure the attribs for the isr load_data op */
  595. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ADDR, 4);
  596. val = HW_FENCE_IPCC_PROTOCOLp_CLIENTc_SEND(ipcc_base_addr,
  597. protocol_id, client_phys_id);
  598. SDE_REG_WRITE(&c, offset, val);
  599. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 4);
  600. val = MDP_CTL_FENCE_ATTRS(0x1, 0x2, 0x0);
  601. SDE_REG_WRITE(&c, offset, val);
  602. offset = MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_MASK, 4);
  603. SDE_REG_WRITE(&c, offset, 0xFFFFFFFF);
  604. /* program output-fence isr ops */
  605. /* set load_data op*/
  606. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  607. HW_FENCE_DPU_OUTPUT_FENCE_START_N);
  608. val = MDP_CTL_FENCE_ISR_OP_CODE(0x6, 0x0, 0x4, 0x0);
  609. SDE_REG_WRITE(&c, offset, val);
  610. /* set write_reg op */
  611. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  612. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 1));
  613. val = MDP_CTL_FENCE_ISR_OP_CODE(0x2, 0x4, 0x0, 0x0);
  614. SDE_REG_WRITE(&c, offset, val);
  615. /* set exit op */
  616. offset = MDP_CTL_HW_FENCE_ID_OFFSET_n(MDP_CTL_HW_FENCE_IDn_ISR,
  617. (HW_FENCE_DPU_OUTPUT_FENCE_START_N + 2));
  618. val = MDP_CTL_FENCE_ISR_OP_CODE(0xf, 0x0, 0x0, 0x0);
  619. SDE_REG_WRITE(&c, offset, val);
  620. }
  621. static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, unsigned long cap, u32 hw_fence_rev)
  622. {
  623. ops->setup_split_pipe = sde_hw_setup_split_pipe;
  624. ops->setup_pp_split = sde_hw_setup_pp_split;
  625. ops->setup_cdm_output = sde_hw_setup_cdm_output;
  626. ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
  627. ops->get_clk_ctrl_status = sde_hw_get_clk_ctrl_status;
  628. ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
  629. ops->reset_ubwc = sde_hw_reset_ubwc;
  630. ops->intf_audio_select = sde_hw_intf_audio_select;
  631. ops->set_mdp_hw_events = sde_hw_mdp_events;
  632. if (cap & BIT(SDE_MDP_VSYNC_SEL))
  633. ops->setup_vsync_source = sde_hw_setup_vsync_source;
  634. else if (cap & BIT(SDE_MDP_WD_TIMER))
  635. ops->setup_vsync_source = sde_hw_setup_vsync_source_v1;
  636. if (cap & BIT(SDE_MDP_DHDR_MEMPOOL_4K) ||
  637. cap & BIT(SDE_MDP_DHDR_MEMPOOL))
  638. ops->set_hdr_plus_metadata = sde_hw_set_hdr_plus_metadata;
  639. ops->get_autorefresh_status = sde_hw_get_autorefresh_status;
  640. if (hw_fence_rev) {
  641. ops->setup_hw_fences = sde_hw_setup_hw_fences_config;
  642. ops->hw_fence_input_timestamp_ctrl = sde_hw_hw_fence_timestamp_ctrl;
  643. ops->hw_fence_input_status = sde_hw_input_hw_fence_status;
  644. }
  645. }
  646. static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
  647. const struct sde_mdss_cfg *m,
  648. void __iomem *addr,
  649. struct sde_hw_blk_reg_map *b)
  650. {
  651. int i;
  652. if (!m || !addr || !b)
  653. return ERR_PTR(-EINVAL);
  654. for (i = 0; i < m->mdp_count; i++) {
  655. if (mdp == m->mdp[i].id) {
  656. b->base_off = addr;
  657. b->blk_off = m->mdp[i].base;
  658. b->length = m->mdp[i].len;
  659. b->hw_rev = m->hw_rev;
  660. b->log_mask = SDE_DBG_MASK_TOP;
  661. return &m->mdp[i];
  662. }
  663. }
  664. return ERR_PTR(-EINVAL);
  665. }
  666. struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
  667. void __iomem *addr,
  668. const struct sde_mdss_cfg *m)
  669. {
  670. struct sde_hw_mdp *mdp;
  671. const struct sde_mdp_cfg *cfg;
  672. if (!addr || !m)
  673. return ERR_PTR(-EINVAL);
  674. mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
  675. if (!mdp)
  676. return ERR_PTR(-ENOMEM);
  677. cfg = _top_offset(idx, m, addr, &mdp->hw);
  678. if (IS_ERR_OR_NULL(cfg)) {
  679. kfree(mdp);
  680. return ERR_PTR(-EINVAL);
  681. }
  682. /*
  683. * Assign ops
  684. */
  685. mdp->idx = idx;
  686. mdp->caps = cfg;
  687. _setup_mdp_ops(&mdp->ops, mdp->caps->features, m->hw_fence_rev);
  688. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "mdss_hw", 0,
  689. m->mdss_hw_block_size, 0);
  690. if (test_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &m->mdp[0].features)) {
  691. char name[SDE_HW_BLK_NAME_LEN];
  692. snprintf(name, sizeof(name), "%s_1", cfg->name);
  693. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, mdp->hw.blk_off,
  694. mdp->hw.blk_off + MDP_PERIPH_TOP0, mdp->hw.xin_id);
  695. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, name, mdp->hw.blk_off + MDP_SSPP_TOP2,
  696. mdp->hw.blk_off + mdp->hw.length, mdp->hw.xin_id);
  697. /* do not use blk_off, following offsets start from mdp_phys */
  698. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "hw_fence", MDP_CTL_HW_FENCE_CTRL,
  699. MDP_CTL_HW_FENCE_ID_OFFSET_m(MDP_CTL_HW_FENCE_IDm_ATTR, 5), mdp->hw.xin_id);
  700. } else {
  701. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  702. mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
  703. mdp->hw.xin_id);
  704. }
  705. sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
  706. return mdp;
  707. }
  708. void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
  709. {
  710. kfree(mdp);
  711. }