sde_encoder_dce.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/kthread.h>
  6. #include <linux/debugfs.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/sde_rsc.h>
  9. #include "msm_drv.h"
  10. #include "sde_kms.h"
  11. #include <drm/drm_crtc.h>
  12. #include <drm/drm_crtc_helper.h>
  13. #include "sde_hwio.h"
  14. #include "sde_hw_catalog.h"
  15. #include "sde_hw_intf.h"
  16. #include "sde_hw_ctl.h"
  17. #include "sde_formats.h"
  18. #include "sde_encoder_phys.h"
  19. #include "sde_power_handle.h"
  20. #include "sde_hw_dsc.h"
  21. #include "sde_hw_vdc.h"
  22. #include "sde_crtc.h"
  23. #include "sde_trace.h"
  24. #include "sde_core_irq.h"
  25. #include "sde_dsc_helper.h"
  26. #include "sde_vdc_helper.h"
  27. #define SDE_DEBUG_DCE(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
  28. (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  29. #define SDE_ERROR_DCE(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
  30. (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  31. bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
  32. {
  33. enum sde_rm_topology_name topology;
  34. struct sde_encoder_virt *sde_enc;
  35. struct drm_connector *drm_conn;
  36. if (!drm_enc)
  37. return false;
  38. sde_enc = to_sde_encoder_virt(drm_enc);
  39. if (!sde_enc->cur_master)
  40. return false;
  41. drm_conn = sde_enc->cur_master->connector;
  42. if (!drm_conn)
  43. return false;
  44. topology = sde_connector_get_topology_name(drm_conn);
  45. if (topology == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE)
  46. return true;
  47. return false;
  48. }
  49. static int _dce_dsc_update_pic_dim(struct msm_display_dsc_info *dsc,
  50. int pic_width, int pic_height)
  51. {
  52. if (!dsc || !pic_width || !pic_height) {
  53. SDE_ERROR("invalid input: pic_width=%d pic_height=%d\n",
  54. pic_width, pic_height);
  55. return -EINVAL;
  56. }
  57. if ((pic_width % dsc->config.slice_width) ||
  58. (pic_height % dsc->config.slice_height)) {
  59. SDE_ERROR("pic_dim=%dx%d has to be multiple of slice=%dx%d\n",
  60. pic_width, pic_height,
  61. dsc->config.slice_width, dsc->config.slice_height);
  62. return -EINVAL;
  63. }
  64. dsc->config.pic_width = pic_width;
  65. dsc->config.pic_height = pic_height;
  66. return 0;
  67. }
  68. static int _dce_vdc_update_pic_dim(struct msm_display_vdc_info *vdc,
  69. int frame_width, int frame_height)
  70. {
  71. if (!vdc || !frame_width || !frame_height) {
  72. SDE_ERROR("invalid input: frame_width=%d frame_height=%d\n",
  73. frame_width, frame_height);
  74. return -EINVAL;
  75. }
  76. if ((frame_width % vdc->slice_width) ||
  77. (frame_height % vdc->slice_height)) {
  78. SDE_ERROR("pic_dim=%dx%d has to be multiple of slice=%dx%d\n",
  79. frame_width, frame_height,
  80. vdc->slice_width, vdc->slice_height);
  81. return -EINVAL;
  82. }
  83. vdc->frame_width = frame_width;
  84. vdc->frame_height = frame_height;
  85. return 0;
  86. }
  87. static int _dce_dsc_initial_line_calc(struct msm_display_dsc_info *dsc,
  88. int enc_ip_width,
  89. int dsc_cmn_mode)
  90. {
  91. int max_ssm_delay, max_se_size, max_muxword_size;
  92. int compress_bpp_group, obuf_latency, input_ssm_out_latency;
  93. int base_hs_latency, chunk_bits, ob_data_width;
  94. int output_rate_extra_budget_bits, multi_hs_extra_budget_bits;
  95. int multi_hs_extra_latency, mux_word_size;
  96. int ob_data_width_4comps, ob_data_width_3comps;
  97. int output_rate_ratio_complement, container_slice_width;
  98. int rtl_num_components, multi_hs_c, multi_hs_d;
  99. int bpc = dsc->config.bits_per_component;
  100. int bpp = DSC_BPP(dsc->config);
  101. int num_of_active_ss = dsc->config.slice_count;
  102. bool native_422 = dsc->config.native_422;
  103. bool native_420 = dsc->config.native_420;
  104. /* Hardent core config */
  105. int multiplex_mode_enable = 0, split_panel_enable = 0;
  106. int rtl_max_bpc = 10, rtl_output_data_width = 64;
  107. int pipeline_latency = 28;
  108. if (dsc_cmn_mode & DSC_MODE_MULTIPLEX)
  109. multiplex_mode_enable = 1;
  110. if (dsc_cmn_mode & DSC_MODE_SPLIT_PANEL)
  111. split_panel_enable = 0;
  112. container_slice_width = (native_422 ?
  113. dsc->config.slice_width / 2 : dsc->config.slice_width);
  114. max_muxword_size = (rtl_max_bpc >= 12) ? 64 : 48;
  115. max_se_size = 4 * (rtl_max_bpc + 1);
  116. max_ssm_delay = max_se_size + max_muxword_size - 1;
  117. mux_word_size = (bpc >= 12) ? 64 : 48;
  118. compress_bpp_group = native_422 ? (2 * bpp) : bpp;
  119. input_ssm_out_latency = pipeline_latency + 3 * (max_ssm_delay + 2)
  120. * num_of_active_ss;
  121. rtl_num_components = (native_420 || native_422) ? 4 : 3;
  122. ob_data_width_4comps = (rtl_output_data_width >= (2 *
  123. max_muxword_size)) ?
  124. rtl_output_data_width :
  125. (2 * rtl_output_data_width);
  126. ob_data_width_3comps = (rtl_output_data_width >= max_muxword_size) ?
  127. rtl_output_data_width : 2 * rtl_output_data_width;
  128. ob_data_width = (rtl_num_components == 4) ?
  129. ob_data_width_4comps : ob_data_width_3comps;
  130. obuf_latency = DIV_ROUND_UP((9 * ob_data_width + mux_word_size),
  131. compress_bpp_group) + 1;
  132. base_hs_latency = dsc->config.initial_xmit_delay +
  133. input_ssm_out_latency + obuf_latency;
  134. chunk_bits = 8 * dsc->config.slice_chunk_size;
  135. output_rate_ratio_complement = ob_data_width - compress_bpp_group;
  136. output_rate_extra_budget_bits =
  137. (output_rate_ratio_complement * chunk_bits) >>
  138. ((ob_data_width == 128) ? 7 : 6);
  139. multi_hs_c = split_panel_enable * multiplex_mode_enable;
  140. multi_hs_d = (num_of_active_ss > 1) * (ob_data_width >
  141. compress_bpp_group);
  142. multi_hs_extra_budget_bits = multi_hs_c ?
  143. chunk_bits : (multi_hs_d ? chunk_bits :
  144. output_rate_extra_budget_bits);
  145. multi_hs_extra_latency = DIV_ROUND_UP(multi_hs_extra_budget_bits,
  146. compress_bpp_group);
  147. dsc->initial_lines = DIV_ROUND_UP((base_hs_latency +
  148. multi_hs_extra_latency),
  149. container_slice_width);
  150. return 0;
  151. }
  152. static bool _dce_dsc_ich_reset_override_needed(bool pu_en,
  153. struct msm_display_dsc_info *dsc)
  154. {
  155. /*
  156. * As per the DSC spec, ICH_RESET can be either end of the slice line
  157. * or at the end of the slice. HW internally generates ich_reset at
  158. * end of the slice line if DSC_MERGE is used or encoder has two
  159. * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
  160. * is not used then it will generate ich_reset at the end of slice.
  161. *
  162. * Now as per the spec, during one PPS session, position where
  163. * ich_reset is generated should not change. Now if full-screen frame
  164. * has more than 1 soft slice then HW will automatically generate
  165. * ich_reset at the end of slice_line. But for the same panel, if
  166. * partial frame is enabled and only 1 encoder is used with 1 slice,
  167. * then HW will generate ich_reset at end of the slice. This is a
  168. * mismatch. Prevent this by overriding HW's decision.
  169. */
  170. return pu_en && dsc && (dsc->config.slice_count > 1) &&
  171. (dsc->config.slice_width == dsc->config.pic_width);
  172. }
  173. static void _dce_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
  174. struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
  175. u32 common_mode, bool ich_reset,
  176. struct sde_hw_pingpong *hw_dsc_pp,
  177. enum sde_3d_blend_mode mode_3d,
  178. bool disable_merge_3d, bool enable,
  179. bool half_panel_partial_update)
  180. {
  181. if (!enable) {
  182. /*
  183. * avoid disabling dsc encoder in pp-block as it is
  184. * not double-buffered and is not required to be disabled
  185. * for half panel updates
  186. */
  187. if (hw_dsc_pp && hw_dsc_pp->ops.disable_dsc
  188. && !half_panel_partial_update)
  189. hw_dsc_pp->ops.disable_dsc(hw_dsc_pp);
  190. if (hw_dsc && hw_dsc->ops.dsc_disable)
  191. hw_dsc->ops.dsc_disable(hw_dsc);
  192. if (hw_dsc && hw_dsc->ops.bind_pingpong_blk)
  193. hw_dsc->ops.bind_pingpong_blk(hw_dsc, false,
  194. PINGPONG_MAX);
  195. if (mode_3d && hw_pp && hw_pp->ops.reset_3d_mode)
  196. hw_pp->ops.reset_3d_mode(hw_pp);
  197. return;
  198. }
  199. if (!dsc || !hw_dsc || !hw_pp) {
  200. SDE_ERROR("invalid params %d %d %d\n", !dsc, !hw_dsc,
  201. !hw_pp);
  202. return;
  203. }
  204. if (hw_dsc->ops.dsc_config)
  205. hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
  206. if (hw_dsc->ops.dsc_config_thresh)
  207. hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
  208. if (hw_dsc_pp && hw_dsc_pp->ops.setup_dsc)
  209. hw_dsc_pp->ops.setup_dsc(hw_dsc_pp);
  210. if (mode_3d && disable_merge_3d && hw_pp->ops.reset_3d_mode) {
  211. SDE_DEBUG("disabling 3d mux \n");
  212. hw_pp->ops.reset_3d_mode(hw_pp);
  213. } else if (mode_3d && disable_merge_3d && hw_pp->ops.setup_3d_mode) {
  214. SDE_DEBUG("enabling 3d mux \n");
  215. hw_pp->ops.setup_3d_mode(hw_pp, mode_3d);
  216. }
  217. if (hw_dsc && hw_dsc->ops.bind_pingpong_blk)
  218. hw_dsc->ops.bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
  219. if (hw_dsc_pp && hw_dsc_pp->ops.enable_dsc)
  220. hw_dsc_pp->ops.enable_dsc(hw_dsc_pp);
  221. }
  222. static void _dce_vdc_pipe_cfg(struct sde_hw_vdc *hw_vdc,
  223. struct sde_hw_pingpong *hw_pp,
  224. struct msm_display_vdc_info *vdc,
  225. enum sde_3d_blend_mode mode_3d,
  226. bool disable_merge_3d, bool enable)
  227. {
  228. if (!vdc || !hw_vdc || !hw_pp) {
  229. SDE_ERROR("invalid params %d %d %d\n", !vdc, !hw_vdc,
  230. !hw_pp);
  231. return;
  232. }
  233. if (!enable) {
  234. if (hw_vdc->ops.vdc_disable)
  235. hw_vdc->ops.vdc_disable(hw_vdc);
  236. if (hw_vdc->ops.bind_pingpong_blk)
  237. hw_vdc->ops.bind_pingpong_blk(hw_vdc, false,
  238. PINGPONG_MAX);
  239. if (mode_3d && hw_pp->ops.reset_3d_mode)
  240. hw_pp->ops.reset_3d_mode(hw_pp);
  241. return;
  242. }
  243. if (hw_vdc->ops.vdc_config)
  244. hw_vdc->ops.vdc_config(hw_vdc, vdc);
  245. if (mode_3d && disable_merge_3d && hw_pp->ops.reset_3d_mode) {
  246. SDE_DEBUG("disabling 3d mux\n");
  247. hw_pp->ops.reset_3d_mode(hw_pp);
  248. }
  249. if (mode_3d && !disable_merge_3d && hw_pp->ops.setup_3d_mode) {
  250. SDE_DEBUG("enabling 3d mux\n");
  251. hw_pp->ops.setup_3d_mode(hw_pp, mode_3d);
  252. }
  253. if (hw_vdc->ops.bind_pingpong_blk)
  254. hw_vdc->ops.bind_pingpong_blk(hw_vdc, true, hw_pp->idx);
  255. }
  256. static inline bool _dce_check_half_panel_update(int num_lm,
  257. unsigned long affected_displays)
  258. {
  259. /**
  260. * partial update logic is currently supported only upto dual
  261. * pipe configurations.
  262. */
  263. return (hweight_long(affected_displays) != num_lm);
  264. }
  265. static int _dce_dsc_setup_single(struct sde_encoder_virt *sde_enc,
  266. struct msm_display_dsc_info *dsc,
  267. unsigned long affected_displays, int index,
  268. const struct sde_rect *roi, int dsc_common_mode,
  269. bool merge_3d, bool disable_merge_3d, bool mode_3d,
  270. bool half_panel_partial_update, int ich_res)
  271. {
  272. struct sde_hw_ctl *hw_ctl;
  273. struct sde_hw_dsc *hw_dsc;
  274. struct sde_hw_pingpong *hw_pp;
  275. struct sde_hw_pingpong *hw_dsc_pp;
  276. struct sde_hw_intf_cfg_v1 cfg;
  277. bool active = !!((1 << index) & affected_displays);
  278. hw_ctl = sde_enc->cur_master->hw_ctl;
  279. /*
  280. * in 3d_merge and half_panel partial update dsc should be
  281. * bound to the pp which is driving the update, else in
  282. * 3d_merge dsc should be bound to left side of the pipe
  283. */
  284. if (merge_3d && half_panel_partial_update)
  285. hw_pp = (active) ? sde_enc->hw_pp[0] : sde_enc->hw_pp[1];
  286. else
  287. hw_pp = sde_enc->hw_pp[index];
  288. hw_dsc = sde_enc->hw_dsc[index];
  289. hw_dsc_pp = sde_enc->hw_dsc_pp[index];
  290. if (!hw_pp || !hw_dsc) {
  291. SDE_ERROR_DCE(sde_enc, "DSC: invalid params %d %d\n", !!hw_pp,
  292. !!hw_dsc);
  293. SDE_EVT32(DRMID(&sde_enc->base), !hw_pp, !hw_dsc,
  294. SDE_EVTLOG_ERROR);
  295. return -EINVAL;
  296. }
  297. SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode,
  298. index, active, merge_3d, disable_merge_3d);
  299. _dce_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode, ich_res,
  300. hw_dsc_pp, mode_3d, disable_merge_3d, active,
  301. half_panel_partial_update);
  302. memset(&cfg, 0, sizeof(cfg));
  303. cfg.dsc[cfg.dsc_count++] = hw_dsc->idx;
  304. if (hw_ctl->ops.update_intf_cfg)
  305. hw_ctl->ops.update_intf_cfg(hw_ctl, &cfg, active);
  306. if (hw_ctl->ops.update_bitmask)
  307. hw_ctl->ops.update_bitmask(hw_ctl, SDE_HW_FLUSH_DSC,
  308. hw_dsc->idx, active);
  309. SDE_DEBUG_DCE(sde_enc, "update_intf_cfg hw_ctl[%d], dsc:%d, %s",
  310. hw_ctl->idx, cfg.dsc[0],
  311. active ? "enabled" : "disabled");
  312. if (mode_3d) {
  313. memset(&cfg, 0, sizeof(cfg));
  314. cfg.merge_3d[cfg.merge_3d_count++] = hw_pp->merge_3d->idx;
  315. if (hw_ctl->ops.update_intf_cfg)
  316. hw_ctl->ops.update_intf_cfg(hw_ctl, &cfg,
  317. !disable_merge_3d);
  318. if (hw_ctl->ops.update_bitmask)
  319. hw_ctl->ops.update_bitmask(
  320. hw_ctl, SDE_HW_FLUSH_MERGE_3D,
  321. hw_pp->merge_3d->idx, true);
  322. SDE_DEBUG("mode_3d %s, on CTL_%d PP-%d merge3d:%d\n",
  323. !disable_merge_3d ? "enabled" : "disabled",
  324. hw_ctl->idx - CTL_0, hw_pp->idx - PINGPONG_0,
  325. hw_pp->merge_3d ?
  326. hw_pp->merge_3d->idx - MERGE_3D_0 :
  327. -1);
  328. }
  329. return 0;
  330. }
  331. static int _dce_dsc_setup_helper(struct sde_encoder_virt *sde_enc,
  332. unsigned long affected_displays,
  333. enum sde_rm_topology_name topology)
  334. {
  335. struct sde_kms *sde_kms;
  336. struct sde_encoder_phys *enc_master;
  337. struct msm_display_dsc_info *dsc = NULL;
  338. const struct sde_rm_topology_def *def;
  339. const struct sde_rect *roi;
  340. enum sde_3d_blend_mode mode_3d;
  341. bool half_panel_partial_update, dsc_merge, merge_3d;
  342. bool disable_merge_3d = false;
  343. int this_frame_slices;
  344. int intf_ip_w, enc_ip_w;
  345. int num_intf, num_dsc, num_lm;
  346. int ich_res;
  347. int dsc_common_mode = 0;
  348. int i;
  349. int rc = 0;
  350. sde_kms = sde_encoder_get_kms(&sde_enc->base);
  351. def = sde_rm_topology_get_topology_def(&sde_kms->rm, topology);
  352. if (IS_ERR_OR_NULL(def))
  353. return -EINVAL;
  354. enc_master = sde_enc->cur_master;
  355. roi = &sde_enc->cur_conn_roi;
  356. dsc = &sde_enc->mode_info.comp_info.dsc_info;
  357. num_dsc = def->num_comp_enc;
  358. num_intf = def->num_intf;
  359. mode_3d = (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC) ?
  360. BLEND_3D_H_ROW_INT : BLEND_3D_NONE;
  361. num_lm = def->num_lm;
  362. half_panel_partial_update = _dce_check_half_panel_update(num_lm,
  363. affected_displays);
  364. merge_3d = (mode_3d != BLEND_3D_NONE) ? true : false;
  365. dsc_merge = ((num_dsc > num_intf) && !half_panel_partial_update) ?
  366. true : false;
  367. disable_merge_3d = (merge_3d && half_panel_partial_update) ?
  368. false : true;
  369. /*
  370. * If this encoder is driving more than one DSC encoder, they
  371. * operate in tandem, same pic dimension needs to be used by
  372. * each of them.(pp-split is assumed to be not supported)
  373. */
  374. _dce_dsc_update_pic_dim(dsc, roi->w, roi->h);
  375. this_frame_slices = roi->w / dsc->config.slice_width;
  376. intf_ip_w = this_frame_slices * dsc->config.slice_width;
  377. enc_ip_w = intf_ip_w;
  378. if (!half_panel_partial_update)
  379. intf_ip_w /= def->num_intf;
  380. if (!half_panel_partial_update && (num_dsc > 1))
  381. dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
  382. if (dsc_merge) {
  383. dsc_common_mode |= DSC_MODE_MULTIPLEX;
  384. /*
  385. * in dsc merge case: when using 2 encoders for the same
  386. * stream, no. of slices need to be same on both the
  387. * encoders.
  388. */
  389. enc_ip_w = intf_ip_w / 2;
  390. }
  391. if (enc_master->intf_mode == INTF_MODE_VIDEO)
  392. dsc_common_mode |= DSC_MODE_VIDEO;
  393. sde_dsc_populate_dsc_private_params(dsc, intf_ip_w);
  394. _dce_dsc_initial_line_calc(dsc, enc_ip_w, dsc_common_mode);
  395. /*
  396. * __is_ich_reset_override_needed should be called only after
  397. * updating pic dimension, mdss_panel_dsc_update_pic_dim.
  398. */
  399. ich_res = _dce_dsc_ich_reset_override_needed(
  400. (half_panel_partial_update && !merge_3d), dsc);
  401. SDE_DEBUG_DCE(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
  402. roi->w, roi->h, dsc_common_mode);
  403. for (i = 0; i < num_dsc; i++) {
  404. rc = _dce_dsc_setup_single(sde_enc, dsc, affected_displays, i,
  405. roi, dsc_common_mode, merge_3d,
  406. disable_merge_3d, mode_3d,
  407. half_panel_partial_update, ich_res);
  408. if (rc)
  409. break;
  410. }
  411. return rc;
  412. }
  413. static int _dce_dsc_setup(struct sde_encoder_virt *sde_enc,
  414. struct sde_encoder_kickoff_params *params)
  415. {
  416. struct drm_connector *drm_conn;
  417. enum sde_rm_topology_name topology;
  418. if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
  419. !sde_enc->phys_encs[0]->connector)
  420. return -EINVAL;
  421. drm_conn = sde_enc->phys_encs[0]->connector;
  422. topology = sde_connector_get_topology_name(drm_conn);
  423. if (topology == SDE_RM_TOPOLOGY_NONE) {
  424. SDE_ERROR_DCE(sde_enc, "topology not set yet\n");
  425. return -EINVAL;
  426. }
  427. SDE_DEBUG_DCE(sde_enc, "topology:%d\n", topology);
  428. if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
  429. &sde_enc->prv_conn_roi))
  430. return 0;
  431. SDE_EVT32(DRMID(&sde_enc->base), topology,
  432. sde_enc->cur_conn_roi.x, sde_enc->cur_conn_roi.y,
  433. sde_enc->cur_conn_roi.w, sde_enc->cur_conn_roi.h,
  434. sde_enc->prv_conn_roi.x, sde_enc->prv_conn_roi.y,
  435. sde_enc->prv_conn_roi.w, sde_enc->prv_conn_roi.h,
  436. sde_enc->cur_master->cached_mode.hdisplay,
  437. sde_enc->cur_master->cached_mode.vdisplay);
  438. return _dce_dsc_setup_helper(sde_enc, params->affected_displays,
  439. topology);
  440. }
  441. static int _dce_vdc_setup(struct sde_encoder_virt *sde_enc,
  442. struct sde_encoder_kickoff_params *params)
  443. {
  444. struct drm_connector *drm_conn;
  445. struct sde_kms *sde_kms;
  446. struct sde_encoder_phys *enc_master;
  447. struct sde_hw_vdc *hw_vdc[MAX_CHANNELS_PER_ENC];
  448. struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
  449. struct msm_display_vdc_info *vdc = NULL;
  450. enum sde_rm_topology_name topology;
  451. const struct sde_rect *roi;
  452. struct sde_hw_ctl *hw_ctl;
  453. struct sde_hw_intf_cfg_v1 cfg;
  454. enum sde_3d_blend_mode mode_3d;
  455. bool half_panel_partial_update, merge_3d;
  456. bool disable_merge_3d = false;
  457. int this_frame_slices;
  458. int intf_ip_w, enc_ip_w;
  459. const struct sde_rm_topology_def *def;
  460. int num_intf, num_vdc, num_lm;
  461. int i;
  462. int ret = 0;
  463. if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
  464. !sde_enc->phys_encs[0]->connector)
  465. return -EINVAL;
  466. drm_conn = sde_enc->phys_encs[0]->connector;
  467. topology = sde_connector_get_topology_name(drm_conn);
  468. if (topology == SDE_RM_TOPOLOGY_NONE) {
  469. SDE_ERROR_DCE(sde_enc, "topology not set yet\n");
  470. return -EINVAL;
  471. }
  472. SDE_DEBUG_DCE(sde_enc, "topology:%d\n", topology);
  473. SDE_EVT32(DRMID(&sde_enc->base), topology,
  474. sde_enc->cur_conn_roi.x,
  475. sde_enc->cur_conn_roi.y,
  476. sde_enc->cur_conn_roi.w,
  477. sde_enc->cur_conn_roi.h,
  478. sde_enc->prv_conn_roi.x,
  479. sde_enc->prv_conn_roi.y,
  480. sde_enc->prv_conn_roi.w,
  481. sde_enc->prv_conn_roi.h,
  482. sde_enc->cur_master->cached_mode.hdisplay,
  483. sde_enc->cur_master->cached_mode.vdisplay);
  484. if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
  485. &sde_enc->prv_conn_roi))
  486. return ret;
  487. enc_master = sde_enc->cur_master;
  488. roi = &sde_enc->cur_conn_roi;
  489. hw_ctl = enc_master->hw_ctl;
  490. vdc = &sde_enc->mode_info.comp_info.vdc_info;
  491. sde_kms = sde_encoder_get_kms(&sde_enc->base);
  492. def = sde_rm_topology_get_topology_def(&sde_kms->rm, topology);
  493. if (IS_ERR_OR_NULL(def))
  494. return -EINVAL;
  495. num_vdc = def->num_comp_enc;
  496. num_intf = def->num_intf;
  497. mode_3d = (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_VDC) ?
  498. BLEND_3D_H_ROW_INT : BLEND_3D_NONE;
  499. num_lm = def->num_lm;
  500. /*
  501. * If this encoder is driving more than one VDC encoder, they
  502. * operate in tandem, same pic dimension needs to be used by
  503. * each of them.(pp-split is assumed to be not supported)
  504. */
  505. _dce_vdc_update_pic_dim(vdc, roi->w, roi->h);
  506. merge_3d = (mode_3d != BLEND_3D_NONE) ? true : false;
  507. half_panel_partial_update = _dce_check_half_panel_update(num_lm,
  508. params->affected_displays);
  509. if (half_panel_partial_update && merge_3d)
  510. disable_merge_3d = true;
  511. this_frame_slices = roi->w / vdc->slice_width;
  512. intf_ip_w = this_frame_slices * vdc->slice_width;
  513. sde_vdc_populate_config(vdc, intf_ip_w, vdc->traffic_mode);
  514. enc_ip_w = intf_ip_w;
  515. SDE_DEBUG_DCE(sde_enc, "pic_w: %d pic_h: %d\n",
  516. roi->w, roi->h);
  517. for (i = 0; i < num_vdc; i++) {
  518. bool active = !!((1 << i) & params->affected_displays);
  519. /*
  520. * if half_panel partial update vdc should be bound to the pp
  521. * that is driving the update, in other case when both the
  522. * layer mixers are driving the update, vdc should be bound
  523. * to left side pp
  524. */
  525. if (merge_3d && half_panel_partial_update)
  526. hw_pp[i] = (active) ? sde_enc->hw_pp[0] :
  527. sde_enc->hw_pp[1];
  528. else
  529. hw_pp[i] = sde_enc->hw_pp[i];
  530. hw_vdc[i] = sde_enc->hw_vdc[i];
  531. if (!hw_vdc[i]) {
  532. SDE_ERROR_DCE(sde_enc, "invalid params for VDC\n");
  533. SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
  534. i, active);
  535. return -EINVAL;
  536. }
  537. _dce_vdc_pipe_cfg(hw_vdc[i], hw_pp[i],
  538. vdc, mode_3d, disable_merge_3d, active);
  539. memset(&cfg, 0, sizeof(cfg));
  540. cfg.vdc[cfg.vdc_count++] = hw_vdc[i]->idx;
  541. if (hw_ctl->ops.update_intf_cfg)
  542. hw_ctl->ops.update_intf_cfg(hw_ctl,
  543. &cfg,
  544. active);
  545. if (hw_ctl->ops.update_bitmask)
  546. hw_ctl->ops.update_bitmask(hw_ctl,
  547. SDE_HW_FLUSH_VDC,
  548. hw_vdc[i]->idx, active);
  549. SDE_DEBUG_DCE(sde_enc,
  550. "update_intf_cfg hw_ctl[%d], vdc:%d, %s",
  551. hw_ctl->idx,
  552. cfg.vdc[0],
  553. active ? "enabled" : "disabled");
  554. if (mode_3d) {
  555. memset(&cfg, 0, sizeof(cfg));
  556. cfg.merge_3d[cfg.merge_3d_count++] =
  557. hw_pp[i]->merge_3d->idx;
  558. if (hw_ctl->ops.update_intf_cfg)
  559. hw_ctl->ops.update_intf_cfg(hw_ctl,
  560. &cfg,
  561. !disable_merge_3d);
  562. if (hw_ctl->ops.update_bitmask)
  563. hw_ctl->ops.update_bitmask(
  564. hw_ctl, SDE_HW_FLUSH_MERGE_3D,
  565. hw_pp[i]->merge_3d->idx, true);
  566. SDE_DEBUG("mode_3d %s, on CTL_%d PP-%d merge3d:%d\n",
  567. disable_merge_3d ?
  568. "disabled" : "enabled",
  569. hw_ctl->idx - CTL_0,
  570. hw_pp[i]->idx - PINGPONG_0,
  571. hw_pp[i]->merge_3d ?
  572. hw_pp[i]->merge_3d->idx - MERGE_3D_0 :
  573. -1);
  574. }
  575. }
  576. return 0;
  577. }
  578. static void _dce_dsc_disable(struct sde_encoder_virt *sde_enc)
  579. {
  580. int i;
  581. struct sde_hw_pingpong *hw_pp = NULL;
  582. struct sde_hw_pingpong *hw_dsc_pp = NULL;
  583. struct sde_hw_dsc *hw_dsc = NULL;
  584. struct sde_hw_ctl *hw_ctl = NULL;
  585. struct sde_hw_intf_cfg_v1 cfg;
  586. if (!sde_enc || !sde_enc->phys_encs[0]) {
  587. SDE_ERROR("invalid params %d %d\n",
  588. !sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1);
  589. return;
  590. }
  591. /*
  592. * Connector can be null if the first virt modeset after suspend
  593. * is called with dynamic clock or dms enabled.
  594. */
  595. if (!sde_enc->phys_encs[0]->connector)
  596. return;
  597. if (sde_enc->cur_master)
  598. hw_ctl = sde_enc->cur_master->hw_ctl;
  599. memset(&cfg, 0, sizeof(cfg));
  600. /* Disable DSC for all the pp's present in this topology */
  601. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  602. hw_pp = sde_enc->hw_pp[i];
  603. hw_dsc = sde_enc->hw_dsc[i];
  604. hw_dsc_pp = sde_enc->hw_dsc_pp[i];
  605. _dce_dsc_pipe_cfg(hw_dsc, hw_pp, NULL,
  606. 0, 0, hw_dsc_pp,
  607. BLEND_3D_NONE, false, false, false);
  608. if (hw_dsc) {
  609. sde_enc->dirty_dsc_ids[i] = hw_dsc->idx;
  610. cfg.dsc[cfg.dsc_count++] = hw_dsc->idx;
  611. }
  612. }
  613. /* Clear the DSC ACTIVE config for this CTL */
  614. if (hw_ctl && hw_ctl->ops.update_intf_cfg)
  615. hw_ctl->ops.update_intf_cfg(hw_ctl, &cfg, false);
  616. /**
  617. * Since pending flushes from previous commit get cleared
  618. * sometime after this point, setting DSC flush bits now
  619. * will have no effect. Therefore dirty_dsc_ids track which
  620. * DSC blocks must be flushed for the next trigger.
  621. */
  622. }
  623. static void _dce_vdc_disable(struct sde_encoder_virt *sde_enc)
  624. {
  625. int i;
  626. struct sde_hw_pingpong *hw_pp = NULL;
  627. struct sde_hw_vdc *hw_vdc = NULL;
  628. struct sde_hw_ctl *hw_ctl = NULL;
  629. struct sde_hw_intf_cfg_v1 cfg;
  630. if (!sde_enc || !sde_enc->phys_encs[0] ||
  631. !sde_enc->phys_encs[0]->connector) {
  632. SDE_ERROR("invalid params %d %d\n",
  633. !sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1);
  634. return;
  635. }
  636. if (sde_enc->cur_master)
  637. hw_ctl = sde_enc->cur_master->hw_ctl;
  638. memset(&cfg, 0, sizeof(cfg));
  639. /* Disable VDC for all the pp's present in this topology */
  640. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  641. hw_pp = sde_enc->hw_pp[i];
  642. hw_vdc = sde_enc->hw_vdc[i];
  643. _dce_vdc_pipe_cfg(hw_vdc, hw_pp, NULL,
  644. BLEND_3D_NONE, false,
  645. false);
  646. if (hw_vdc) {
  647. sde_enc->dirty_vdc_ids[i] = hw_vdc->idx;
  648. cfg.vdc[cfg.vdc_count++] = hw_vdc->idx;
  649. }
  650. }
  651. /* Clear the VDC ACTIVE config for this CTL */
  652. if (hw_ctl && hw_ctl->ops.update_intf_cfg)
  653. hw_ctl->ops.update_intf_cfg(hw_ctl, &cfg, false);
  654. /**
  655. * Since pending flushes from previous commit get cleared
  656. * sometime after this point, setting VDC flush bits now
  657. * will have no effect. Therefore dirty_vdc_ids track which
  658. * VDC blocks must be flushed for the next trigger.
  659. */
  660. }
  661. bool _dce_dsc_is_dirty(struct sde_encoder_virt *sde_enc)
  662. {
  663. int i;
  664. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  665. /**
  666. * This dirty_dsc_hw field is set during DSC disable to
  667. * indicate which DSC blocks need to be flushed
  668. */
  669. if (sde_enc->dirty_dsc_ids[i])
  670. return true;
  671. }
  672. return false;
  673. }
  674. bool _dce_vdc_is_dirty(struct sde_encoder_virt *sde_enc)
  675. {
  676. int i;
  677. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  678. /**
  679. * This dirty_vdc_hw field is set during VDC disable to
  680. * indicate which VDC blocks need to be flushed
  681. */
  682. if (sde_enc->dirty_vdc_ids[i])
  683. return true;
  684. }
  685. return false;
  686. }
  687. static void _dce_helper_flush_dsc(struct sde_encoder_virt *sde_enc)
  688. {
  689. int i;
  690. struct sde_hw_ctl *hw_ctl = NULL;
  691. enum sde_dsc dsc_idx;
  692. if (sde_enc->cur_master)
  693. hw_ctl = sde_enc->cur_master->hw_ctl;
  694. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  695. dsc_idx = sde_enc->dirty_dsc_ids[i];
  696. if (dsc_idx && hw_ctl && hw_ctl->ops.update_bitmask)
  697. hw_ctl->ops.update_bitmask(hw_ctl, SDE_HW_FLUSH_DSC,
  698. dsc_idx, 1);
  699. sde_enc->dirty_dsc_ids[i] = DSC_NONE;
  700. }
  701. }
  702. void _dce_helper_flush_vdc(struct sde_encoder_virt *sde_enc)
  703. {
  704. int i;
  705. struct sde_hw_ctl *hw_ctl = NULL;
  706. enum sde_vdc vdc_idx;
  707. if (sde_enc->cur_master)
  708. hw_ctl = sde_enc->cur_master->hw_ctl;
  709. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  710. vdc_idx = sde_enc->dirty_vdc_ids[i];
  711. if (vdc_idx && hw_ctl && hw_ctl->ops.update_bitmask)
  712. hw_ctl->ops.update_bitmask(hw_ctl, SDE_HW_FLUSH_VDC,
  713. vdc_idx, 1);
  714. sde_enc->dirty_vdc_ids[i] = VDC_NONE;
  715. }
  716. }
  717. void sde_encoder_dce_set_bpp(struct msm_mode_info mode_info,
  718. struct drm_crtc *crtc)
  719. {
  720. struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
  721. enum msm_display_compression_type comp_type;
  722. int src_bpp, target_bpp;
  723. if (!sde_crtc) {
  724. SDE_DEBUG("invalid sde_crtc\n");
  725. return;
  726. }
  727. comp_type = mode_info.comp_info.comp_type;
  728. /**
  729. * In cases where DSC or VDC compression type is not found, set
  730. * src and target bpp to get compression ratio 8/8 (default).
  731. */
  732. if (comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
  733. struct msm_display_dsc_info dsc_info =
  734. mode_info.comp_info.dsc_info;
  735. src_bpp = msm_get_src_bpc(dsc_info.chroma_format,
  736. dsc_info.config.bits_per_component);
  737. target_bpp = dsc_info.config.bits_per_pixel >> 4;
  738. } else if (comp_type == MSM_DISPLAY_COMPRESSION_VDC) {
  739. struct msm_display_vdc_info vdc_info =
  740. mode_info.comp_info.vdc_info;
  741. src_bpp = msm_get_src_bpc(vdc_info.chroma_format,
  742. vdc_info.bits_per_component);
  743. target_bpp = vdc_info.bits_per_pixel >> 4;
  744. } else {
  745. src_bpp = 8;
  746. target_bpp = 8;
  747. }
  748. sde_crtc_set_bpp(sde_crtc, src_bpp, target_bpp);
  749. SDE_DEBUG("sde_crtc src_bpp = %d, target_bpp = %d\n",
  750. sde_crtc->src_bpp, sde_crtc->target_bpp);
  751. }
  752. void sde_encoder_dce_disable(struct sde_encoder_virt *sde_enc)
  753. {
  754. enum msm_display_compression_type comp_type;
  755. if (!sde_enc)
  756. return;
  757. comp_type = sde_enc->mode_info.comp_info.comp_type;
  758. if (comp_type == MSM_DISPLAY_COMPRESSION_DSC)
  759. _dce_dsc_disable(sde_enc);
  760. else if (comp_type == MSM_DISPLAY_COMPRESSION_VDC)
  761. _dce_vdc_disable(sde_enc);
  762. }
  763. int sde_encoder_dce_flush(struct sde_encoder_virt *sde_enc)
  764. {
  765. int rc = 0;
  766. if (!sde_enc)
  767. return -EINVAL;
  768. if (_dce_dsc_is_dirty(sde_enc))
  769. _dce_helper_flush_dsc(sde_enc);
  770. else if (_dce_vdc_is_dirty(sde_enc))
  771. _dce_helper_flush_vdc(sde_enc);
  772. return rc;
  773. }
  774. int sde_encoder_dce_setup(struct sde_encoder_virt *sde_enc,
  775. struct sde_encoder_kickoff_params *params)
  776. {
  777. enum msm_display_compression_type comp_type;
  778. int rc = 0;
  779. if (!sde_enc)
  780. return -EINVAL;
  781. comp_type = sde_enc->mode_info.comp_info.comp_type;
  782. if (comp_type == MSM_DISPLAY_COMPRESSION_DSC)
  783. rc = _dce_dsc_setup(sde_enc, params);
  784. else if (comp_type == MSM_DISPLAY_COMPRESSION_VDC)
  785. rc = _dce_vdc_setup(sde_enc, params);
  786. return rc;
  787. }