sde_hw_top.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include "sde_hwio.h"
  6. #include "sde_hw_catalog.h"
  7. #include "sde_hw_top.h"
  8. #include "sde_dbg.h"
  9. #include "sde_kms.h"
  10. #define SCRATCH_REGISTER_0 0x14
  11. #define SSPP_SPARE 0x28
  12. #define UBWC_DEC_HW_VERSION 0x058
  13. #define UBWC_STATIC 0x144
  14. #define UBWC_CTRL_2 0x150
  15. #define UBWC_PREDICTION_MODE 0x154
  16. #define FLD_SPLIT_DISPLAY_CMD BIT(1)
  17. #define FLD_SMART_PANEL_FREE_RUN BIT(2)
  18. #define FLD_INTF_1_SW_TRG_MUX BIT(4)
  19. #define FLD_INTF_2_SW_TRG_MUX BIT(8)
  20. #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
  21. #define MDP_DSPP_DBGBUS_CTRL 0x348
  22. #define MDP_DSPP_DBGBUS_STATUS 0x34C
  23. #define DANGER_STATUS 0x360
  24. #define SAFE_STATUS 0x364
  25. #define TE_LINE_INTERVAL 0x3F4
  26. #define TRAFFIC_SHAPER_EN BIT(31)
  27. #define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
  28. #define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
  29. #define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
  30. #define MDP_WD_TIMER_0_CTL 0x380
  31. #define MDP_WD_TIMER_0_CTL2 0x384
  32. #define MDP_WD_TIMER_0_LOAD_VALUE 0x388
  33. #define MDP_WD_TIMER_1_CTL 0x390
  34. #define MDP_WD_TIMER_1_CTL2 0x394
  35. #define MDP_WD_TIMER_1_LOAD_VALUE 0x398
  36. #define MDP_PERIPH_DBGBUS_CTRL 0x418
  37. #define MDP_WD_TIMER_2_CTL 0x420
  38. #define MDP_WD_TIMER_2_CTL2 0x424
  39. #define MDP_WD_TIMER_2_LOAD_VALUE 0x428
  40. #define MDP_WD_TIMER_3_CTL 0x430
  41. #define MDP_WD_TIMER_3_CTL2 0x434
  42. #define MDP_WD_TIMER_3_LOAD_VALUE 0x438
  43. #define MDP_WD_TIMER_4_CTL 0x440
  44. #define MDP_WD_TIMER_4_CTL2 0x444
  45. #define MDP_WD_TIMER_4_LOAD_VALUE 0x448
  46. #define MDP_TICK_COUNT 16
  47. #define XO_CLK_RATE 19200
  48. #define MS_TICKS_IN_SEC 1000
  49. #define AUTOREFRESH_TEST_POINT 0x2
  50. #define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  51. #define CALCULATE_WD_LOAD_VALUE(fps) \
  52. ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
  53. #define DCE_SEL 0x450
  54. #define MDP_SID_VIG0 0x0
  55. #define MDP_SID_VIG1 0x4
  56. #define MDP_SID_VIG2 0x8
  57. #define MDP_SID_VIG3 0xC
  58. #define MDP_SID_DMA0 0x10
  59. #define MDP_SID_DMA1 0x14
  60. #define MDP_SID_DMA2 0x18
  61. #define MDP_SID_DMA3 0x1C
  62. #define MDP_SID_ROT_RD 0x20
  63. #define MDP_SID_ROT_WR 0x24
  64. #define MDP_SID_WB2 0x28
  65. #define MDP_SID_XIN7 0x2C
  66. #define ROT_SID_ID_VAL 0x1c
  67. static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
  68. struct split_pipe_cfg *cfg)
  69. {
  70. struct sde_hw_blk_reg_map *c;
  71. u32 upper_pipe = 0;
  72. u32 lower_pipe = 0;
  73. if (!mdp || !cfg)
  74. return;
  75. c = &mdp->hw;
  76. if (cfg->en) {
  77. if (cfg->mode == INTF_MODE_CMD) {
  78. lower_pipe = FLD_SPLIT_DISPLAY_CMD;
  79. /* interface controlling sw trigger */
  80. if (cfg->intf == INTF_2)
  81. lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
  82. else
  83. lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
  84. /* free run */
  85. if (cfg->pp_split_slave != INTF_MAX)
  86. lower_pipe = FLD_SMART_PANEL_FREE_RUN;
  87. upper_pipe = lower_pipe;
  88. /* smart panel align mode */
  89. lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
  90. } else {
  91. if (cfg->intf == INTF_2) {
  92. lower_pipe = FLD_INTF_1_SW_TRG_MUX;
  93. upper_pipe = FLD_INTF_2_SW_TRG_MUX;
  94. } else {
  95. lower_pipe = FLD_INTF_2_SW_TRG_MUX;
  96. upper_pipe = FLD_INTF_1_SW_TRG_MUX;
  97. }
  98. }
  99. }
  100. SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
  101. SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
  102. SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
  103. SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
  104. }
  105. static u32 sde_hw_get_split_flush(struct sde_hw_mdp *mdp)
  106. {
  107. struct sde_hw_blk_reg_map *c;
  108. if (!mdp)
  109. return 0;
  110. c = &mdp->hw;
  111. return (SDE_REG_READ(c, SSPP_SPARE) & 0x1);
  112. }
  113. static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
  114. struct split_pipe_cfg *cfg)
  115. {
  116. u32 ppb_config = 0x0;
  117. u32 ppb_control = 0x0;
  118. if (!mdp || !cfg)
  119. return;
  120. if (cfg->split_link_en) {
  121. ppb_config |= BIT(16); /* split enable */
  122. ppb_control = BIT(5); /* horz split*/
  123. } else if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
  124. ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
  125. ppb_config |= BIT(16); /* split enable */
  126. ppb_control = BIT(5); /* horz split*/
  127. }
  128. if (cfg->pp_split_index && !cfg->split_link_en) {
  129. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
  130. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
  131. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
  132. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
  133. } else {
  134. SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
  135. SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
  136. SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
  137. SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
  138. }
  139. }
  140. static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
  141. struct cdm_output_cfg *cfg)
  142. {
  143. struct sde_hw_blk_reg_map *c;
  144. u32 out_ctl = 0;
  145. if (!mdp || !cfg)
  146. return;
  147. c = &mdp->hw;
  148. if (cfg->wb_en)
  149. out_ctl |= BIT(24);
  150. else if (cfg->intf_en)
  151. out_ctl |= BIT(19);
  152. SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
  153. }
  154. static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
  155. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  156. {
  157. struct sde_hw_blk_reg_map *c;
  158. u32 reg_off, bit_off;
  159. u32 reg_val, new_val;
  160. bool clk_forced_on;
  161. if (!mdp)
  162. return false;
  163. c = &mdp->hw;
  164. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
  165. return false;
  166. reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
  167. bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
  168. reg_val = SDE_REG_READ(c, reg_off);
  169. if (enable)
  170. new_val = reg_val | BIT(bit_off);
  171. else
  172. new_val = reg_val & ~BIT(bit_off);
  173. SDE_REG_WRITE(c, reg_off, new_val);
  174. wmb(); /* ensure write finished before progressing */
  175. clk_forced_on = !(reg_val & BIT(bit_off));
  176. return clk_forced_on;
  177. }
  178. static int sde_hw_get_clk_ctrl_status(struct sde_hw_mdp *mdp,
  179. enum sde_clk_ctrl_type clk_ctrl, bool *status)
  180. {
  181. struct sde_hw_blk_reg_map *c;
  182. u32 reg_off, bit_off;
  183. if (!mdp)
  184. return -EINVAL;
  185. c = &mdp->hw;
  186. if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX ||
  187. !mdp->caps->clk_status[clk_ctrl].reg_off)
  188. return -EINVAL;
  189. reg_off = mdp->caps->clk_status[clk_ctrl].reg_off;
  190. bit_off = mdp->caps->clk_status[clk_ctrl].bit_off;
  191. *status = SDE_REG_READ(c, reg_off) & BIT(bit_off);
  192. return 0;
  193. }
  194. static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
  195. struct sde_danger_safe_status *status)
  196. {
  197. struct sde_hw_blk_reg_map *c;
  198. u32 value;
  199. if (!mdp || !status)
  200. return;
  201. c = &mdp->hw;
  202. value = SDE_REG_READ(c, DANGER_STATUS);
  203. status->mdp = (value >> 0) & 0x3;
  204. status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
  205. status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
  206. status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
  207. status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
  208. status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
  209. status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
  210. status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
  211. status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
  212. status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
  213. status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
  214. status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
  215. status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
  216. status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
  217. status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
  218. status->wb[WB_0] = 0;
  219. status->wb[WB_1] = 0;
  220. status->wb[WB_2] = (value >> 2) & 0x3;
  221. status->wb[WB_3] = 0;
  222. }
  223. static void _update_vsync_source(struct sde_hw_mdp *mdp,
  224. struct sde_vsync_source_cfg *cfg)
  225. {
  226. struct sde_hw_blk_reg_map *c;
  227. u32 reg, wd_load_value, wd_ctl, wd_ctl2;
  228. if (!mdp || !cfg)
  229. return;
  230. c = &mdp->hw;
  231. if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
  232. cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
  233. switch (cfg->vsync_source) {
  234. case SDE_VSYNC_SOURCE_WD_TIMER_4:
  235. wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
  236. wd_ctl = MDP_WD_TIMER_4_CTL;
  237. wd_ctl2 = MDP_WD_TIMER_4_CTL2;
  238. break;
  239. case SDE_VSYNC_SOURCE_WD_TIMER_3:
  240. wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
  241. wd_ctl = MDP_WD_TIMER_3_CTL;
  242. wd_ctl2 = MDP_WD_TIMER_3_CTL2;
  243. break;
  244. case SDE_VSYNC_SOURCE_WD_TIMER_2:
  245. wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
  246. wd_ctl = MDP_WD_TIMER_2_CTL;
  247. wd_ctl2 = MDP_WD_TIMER_2_CTL2;
  248. break;
  249. case SDE_VSYNC_SOURCE_WD_TIMER_1:
  250. wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
  251. wd_ctl = MDP_WD_TIMER_1_CTL;
  252. wd_ctl2 = MDP_WD_TIMER_1_CTL2;
  253. break;
  254. case SDE_VSYNC_SOURCE_WD_TIMER_0:
  255. default:
  256. wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
  257. wd_ctl = MDP_WD_TIMER_0_CTL;
  258. wd_ctl2 = MDP_WD_TIMER_0_CTL2;
  259. break;
  260. }
  261. if (cfg->is_dummy) {
  262. SDE_REG_WRITE(c, wd_ctl2, 0x0);
  263. } else {
  264. SDE_REG_WRITE(c, wd_load_value,
  265. CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
  266. SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
  267. reg = SDE_REG_READ(c, wd_ctl2);
  268. reg |= BIT(8); /* enable heartbeat timer */
  269. reg |= BIT(0); /* enable WD timer */
  270. SDE_REG_WRITE(c, wd_ctl2, reg);
  271. }
  272. /* make sure that timers are enabled/disabled for vsync state */
  273. wmb();
  274. }
  275. }
  276. static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
  277. struct sde_vsync_source_cfg *cfg)
  278. {
  279. struct sde_hw_blk_reg_map *c;
  280. u32 reg, i;
  281. static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
  282. if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
  283. return;
  284. c = &mdp->hw;
  285. reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
  286. for (i = 0; i < cfg->pp_count; i++) {
  287. int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
  288. if (pp_idx >= ARRAY_SIZE(pp_offset))
  289. continue;
  290. reg &= ~(0xf << pp_offset[pp_idx]);
  291. reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
  292. }
  293. SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
  294. _update_vsync_source(mdp, cfg);
  295. }
  296. static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
  297. struct sde_vsync_source_cfg *cfg)
  298. {
  299. _update_vsync_source(mdp, cfg);
  300. }
  301. static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
  302. struct sde_danger_safe_status *status)
  303. {
  304. struct sde_hw_blk_reg_map *c;
  305. u32 value;
  306. if (!mdp || !status)
  307. return;
  308. c = &mdp->hw;
  309. value = SDE_REG_READ(c, SAFE_STATUS);
  310. status->mdp = (value >> 0) & 0x1;
  311. status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
  312. status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
  313. status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
  314. status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
  315. status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
  316. status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
  317. status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
  318. status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
  319. status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
  320. status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
  321. status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
  322. status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
  323. status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
  324. status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
  325. status->wb[WB_0] = 0;
  326. status->wb[WB_1] = 0;
  327. status->wb[WB_2] = (value >> 2) & 0x1;
  328. status->wb[WB_3] = 0;
  329. }
  330. static void sde_hw_setup_dce(struct sde_hw_mdp *mdp, u32 dce_sel)
  331. {
  332. struct sde_hw_blk_reg_map *c;
  333. if (!mdp)
  334. return;
  335. c = &mdp->hw;
  336. SDE_REG_WRITE(c, DCE_SEL, dce_sel);
  337. }
  338. void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
  339. {
  340. struct sde_hw_blk_reg_map c;
  341. u32 ubwc_version;
  342. if (!mdp || !m)
  343. return;
  344. /* force blk offset to zero to access beginning of register region */
  345. c = mdp->hw;
  346. c.blk_off = 0x0;
  347. ubwc_version = SDE_REG_READ(&c, UBWC_DEC_HW_VERSION);
  348. if (IS_UBWC_40_SUPPORTED(ubwc_version)) {
  349. u32 ver = 2;
  350. u32 mode = 1;
  351. u32 reg = (m->mdp[0].ubwc_swizzle & 0x7) |
  352. ((m->mdp[0].ubwc_static & 0x1) << 3) |
  353. ((m->mdp[0].highest_bank_bit & 0x7) << 4) |
  354. ((m->macrotile_mode & 0x1) << 12);
  355. if (IS_UBWC_30_SUPPORTED(m->ubwc_version)) {
  356. ver = 1;
  357. mode = 0;
  358. }
  359. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  360. SDE_REG_WRITE(&c, UBWC_CTRL_2, ver);
  361. SDE_REG_WRITE(&c, UBWC_PREDICTION_MODE, mode);
  362. } else if (IS_UBWC_20_SUPPORTED(ubwc_version)) {
  363. SDE_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
  364. } else if (IS_UBWC_30_SUPPORTED(ubwc_version)) {
  365. u32 reg = m->mdp[0].ubwc_static |
  366. (m->mdp[0].ubwc_swizzle & 0x1) |
  367. ((m->mdp[0].highest_bank_bit & 0x3) << 4) |
  368. ((m->macrotile_mode & 0x1) << 12);
  369. if (IS_UBWC_30_SUPPORTED(m->ubwc_version))
  370. reg |= BIT(10);
  371. if (IS_UBWC_10_SUPPORTED(m->ubwc_version))
  372. reg |= BIT(8);
  373. SDE_REG_WRITE(&c, UBWC_STATIC, reg);
  374. } else {
  375. SDE_ERROR("Unsupported UBWC version 0x%08x\n", ubwc_version);
  376. }
  377. }
  378. static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp)
  379. {
  380. struct sde_hw_blk_reg_map *c;
  381. if (!mdp)
  382. return;
  383. c = &mdp->hw;
  384. SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
  385. }
  386. static void sde_hw_mdp_events(struct sde_hw_mdp *mdp, bool enable)
  387. {
  388. struct sde_hw_blk_reg_map *c;
  389. if (!mdp)
  390. return;
  391. c = &mdp->hw;
  392. SDE_REG_WRITE(c, HW_EVENTS_CTL, enable);
  393. }
  394. struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
  395. u32 sid_len, const struct sde_mdss_cfg *m)
  396. {
  397. struct sde_hw_sid *c;
  398. c = kzalloc(sizeof(*c), GFP_KERNEL);
  399. if (!c)
  400. return ERR_PTR(-ENOMEM);
  401. c->hw.base_off = addr;
  402. c->hw.blk_off = 0;
  403. c->hw.length = sid_len;
  404. c->hw.hwversion = m->hwversion;
  405. c->hw.log_mask = SDE_DBG_MASK_SID;
  406. return c;
  407. }
  408. void sde_hw_set_rotator_sid(struct sde_hw_sid *sid)
  409. {
  410. if (!sid)
  411. return;
  412. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_RD, ROT_SID_ID_VAL);
  413. SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_WR, ROT_SID_ID_VAL);
  414. }
  415. void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm)
  416. {
  417. u32 offset = 0;
  418. if (!sid)
  419. return;
  420. if ((pipe >= SSPP_VIG0) && (pipe <= SSPP_VIG3))
  421. offset = MDP_SID_VIG0 + ((pipe - SSPP_VIG0) * 4);
  422. else if ((pipe >= SSPP_DMA0) && (pipe <= SSPP_DMA3))
  423. offset = MDP_SID_DMA0 + ((pipe - SSPP_DMA0) * 4);
  424. else
  425. return;
  426. SDE_REG_WRITE(&sid->hw, offset, vm << 2);
  427. }
  428. void sde_hw_set_lutdma_sid(struct sde_hw_sid *sid, u32 vm)
  429. {
  430. if (!sid)
  431. return;
  432. SDE_REG_WRITE(&sid->hw, MDP_SID_XIN7, vm << 2);
  433. }
  434. static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,
  435. bool dual, bool dspp_out)
  436. {
  437. u32 value = dspp_out ? 0x4 : 0x0;
  438. SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value);
  439. if (dual) {
  440. value |= 0x1;
  441. SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value);
  442. }
  443. }
  444. static void sde_hw_set_hdr_plus_metadata(struct sde_hw_mdp *mdp,
  445. u8 *payload, u32 len, u32 stream_id)
  446. {
  447. u32 i, b;
  448. u32 length = len - 1;
  449. u32 d_offset, nb_offset, data = 0;
  450. const u32 dword_size = sizeof(u32);
  451. bool is_4k_aligned = mdp->caps->features &
  452. BIT(SDE_MDP_DHDR_MEMPOOL_4K);
  453. if (!payload || !len) {
  454. SDE_ERROR("invalid payload with length: %d\n", len);
  455. return;
  456. }
  457. if (stream_id) {
  458. if (is_4k_aligned) {
  459. d_offset = DP_DHDR_MEM_POOL_1_DATA_4K;
  460. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES_4K;
  461. } else {
  462. d_offset = DP_DHDR_MEM_POOL_1_DATA;
  463. nb_offset = DP_DHDR_MEM_POOL_1_NUM_BYTES;
  464. }
  465. } else {
  466. if (is_4k_aligned) {
  467. d_offset = DP_DHDR_MEM_POOL_0_DATA_4K;
  468. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES_4K;
  469. } else {
  470. d_offset = DP_DHDR_MEM_POOL_0_DATA;
  471. nb_offset = DP_DHDR_MEM_POOL_0_NUM_BYTES;
  472. }
  473. }
  474. /* payload[0] is set in VSCEXT header byte 1, skip programming here */
  475. SDE_REG_WRITE(&mdp->hw, nb_offset, length);
  476. for (i = 1; i < len; i += dword_size) {
  477. for (b = 0; (i + b) < len && b < dword_size; b++)
  478. data |= payload[i + b] << (8 * b);
  479. SDE_REG_WRITE(&mdp->hw, d_offset, data);
  480. data = 0;
  481. }
  482. }
  483. static u32 sde_hw_get_autorefresh_status(struct sde_hw_mdp *mdp, u32 intf_idx)
  484. {
  485. struct sde_hw_blk_reg_map *c;
  486. u32 autorefresh_status;
  487. u32 blk_id = (intf_idx == INTF_2) ? 65 : 64;
  488. if (!mdp)
  489. return 0;
  490. c = &mdp->hw;
  491. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL,
  492. TEST_MASK(blk_id, AUTOREFRESH_TEST_POINT));
  493. SDE_REG_WRITE(&mdp->hw, MDP_DSPP_DBGBUS_CTRL, 0x7001);
  494. wmb(); /* make sure test bits were written */
  495. autorefresh_status = SDE_REG_READ(&mdp->hw, MDP_DSPP_DBGBUS_STATUS);
  496. SDE_REG_WRITE(&mdp->hw, MDP_PERIPH_DBGBUS_CTRL, 0x0);
  497. return autorefresh_status;
  498. }
  499. static void sde_hw_clear_mode_index(struct sde_hw_mdp *mdp)
  500. {
  501. struct sde_hw_blk_reg_map c;
  502. if (!mdp)
  503. return;
  504. c = mdp->hw;
  505. c.blk_off = 0x0;
  506. SDE_REG_WRITE(&c, SCRATCH_REGISTER_0, 0x0);
  507. }
  508. static void sde_hw_set_mode_index(struct sde_hw_mdp *mdp, u32 display_id,
  509. u32 mode)
  510. {
  511. struct sde_hw_blk_reg_map c;
  512. u32 value = 0;
  513. if (!mdp)
  514. return;
  515. c = mdp->hw;
  516. c.blk_off = 0x0;
  517. /* 4-bits for mode index of each display */
  518. value = SDE_REG_READ(&c, SCRATCH_REGISTER_0);
  519. value |= (mode << (display_id * 4));
  520. SDE_REG_WRITE(&c, SCRATCH_REGISTER_0, value);
  521. }
  522. static u32 sde_hw_get_mode_index(struct sde_hw_mdp *mdp, u32 display_id)
  523. {
  524. struct sde_hw_blk_reg_map c;
  525. u32 value = 0;
  526. c = mdp->hw;
  527. c.blk_off = 0x0;
  528. value = SDE_REG_READ(&c, SCRATCH_REGISTER_0);
  529. value = (value >> (display_id * 4)) & 0xF;
  530. return value;
  531. }
  532. static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
  533. unsigned long cap)
  534. {
  535. ops->setup_split_pipe = sde_hw_setup_split_pipe;
  536. ops->setup_pp_split = sde_hw_setup_pp_split;
  537. ops->setup_cdm_output = sde_hw_setup_cdm_output;
  538. ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
  539. ops->get_clk_ctrl_status = sde_hw_get_clk_ctrl_status;
  540. ops->get_danger_status = sde_hw_get_danger_status;
  541. ops->setup_vsync_source = sde_hw_setup_vsync_source;
  542. ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
  543. ops->get_safe_status = sde_hw_get_safe_status;
  544. ops->get_split_flush_status = sde_hw_get_split_flush;
  545. ops->setup_dce = sde_hw_setup_dce;
  546. ops->reset_ubwc = sde_hw_reset_ubwc;
  547. ops->intf_audio_select = sde_hw_intf_audio_select;
  548. ops->set_mdp_hw_events = sde_hw_mdp_events;
  549. ops->set_mode_index = sde_hw_set_mode_index;
  550. ops->get_mode_index = sde_hw_get_mode_index;
  551. ops->clear_mode_index = sde_hw_clear_mode_index;
  552. if (cap & BIT(SDE_MDP_VSYNC_SEL))
  553. ops->setup_vsync_source = sde_hw_setup_vsync_source;
  554. else
  555. ops->setup_vsync_source = sde_hw_setup_vsync_source_v1;
  556. if (cap & BIT(SDE_MDP_DHDR_MEMPOOL_4K) ||
  557. cap & BIT(SDE_MDP_DHDR_MEMPOOL))
  558. ops->set_hdr_plus_metadata = sde_hw_set_hdr_plus_metadata;
  559. ops->get_autorefresh_status = sde_hw_get_autorefresh_status;
  560. }
  561. static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
  562. const struct sde_mdss_cfg *m,
  563. void __iomem *addr,
  564. struct sde_hw_blk_reg_map *b)
  565. {
  566. int i;
  567. if (!m || !addr || !b)
  568. return ERR_PTR(-EINVAL);
  569. for (i = 0; i < m->mdp_count; i++) {
  570. if (mdp == m->mdp[i].id) {
  571. b->base_off = addr;
  572. b->blk_off = m->mdp[i].base;
  573. b->length = m->mdp[i].len;
  574. b->hwversion = m->hwversion;
  575. b->log_mask = SDE_DBG_MASK_TOP;
  576. return &m->mdp[i];
  577. }
  578. }
  579. return ERR_PTR(-EINVAL);
  580. }
  581. static struct sde_hw_blk_ops sde_hw_ops = {
  582. .start = NULL,
  583. .stop = NULL,
  584. };
  585. struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
  586. void __iomem *addr,
  587. const struct sde_mdss_cfg *m)
  588. {
  589. struct sde_hw_mdp *mdp;
  590. const struct sde_mdp_cfg *cfg;
  591. int rc;
  592. if (!addr || !m)
  593. return ERR_PTR(-EINVAL);
  594. mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
  595. if (!mdp)
  596. return ERR_PTR(-ENOMEM);
  597. cfg = _top_offset(idx, m, addr, &mdp->hw);
  598. if (IS_ERR_OR_NULL(cfg)) {
  599. kfree(mdp);
  600. return ERR_PTR(-EINVAL);
  601. }
  602. /*
  603. * Assign ops
  604. */
  605. mdp->idx = idx;
  606. mdp->caps = cfg;
  607. _setup_mdp_ops(&mdp->ops, mdp->caps->features);
  608. rc = sde_hw_blk_init(&mdp->base, SDE_HW_BLK_TOP, idx, &sde_hw_ops);
  609. if (rc) {
  610. SDE_ERROR("failed to init hw blk %d\n", rc);
  611. goto blk_init_error;
  612. }
  613. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "mdss_hw", 0,
  614. m->mdss_hw_block_size, 0);
  615. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
  616. mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
  617. mdp->hw.xin_id);
  618. sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
  619. return mdp;
  620. blk_init_error:
  621. kzfree(mdp);
  622. return ERR_PTR(rc);
  623. }
  624. void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
  625. {
  626. if (mdp)
  627. sde_hw_blk_destroy(&mdp->base);
  628. kfree(mdp);
  629. }