Bladeren bron

Merge remote-tracking branch 'quic/display-kernel.lnx.4.19' into display-kernel.lnx.1.0

Merge commit to pull display-kernel.lnx.1.0 to tip
* quic/display-kernel.lnx.4.19:
  disp: msm: sde: wait for lp2 and pm_suspend frame trigger
  disp: msm: sde: update max display resolution with scale config
  drm: msm: add support for ltm off event
  disp: msm: sde: switch to WD vsync on unexpected panel jitter
  disp: msm: sde: add null check for pingpong hw block
  disp: msm: dsi: add null check for panel and proper ctrl iteration
  disp: msm: sde: add pending flush for merge3d
  disp: msm: dsi: Set OLED reugalor mode when exit LP1
  disp: msm: config AB/IBB power when AOD mode enter/exit
  disp: msm: Set the dsi panel type
  disp: msm: only set nolp command when panel in LP1/LP2 mode
  disp: msm: sde: Init ltm_buf_busy list head before adding new node
  disp: msm: update topology based on clock requirement
  makefile: add Bengal config support for display techpack
  drm/msm/sde: Fix UBSan warnings in sde
  disp: msm: sde: reserve primary CTL and LM for primary connector
  disp: msm: sde: fix inline rotator downscale ratio check
  disp: msm: add changes for bengal target compilation
  clk: qcom: mdss: remove recalculation of vco rate for 10nm pll
  disp: msm: sde: avoid plane update if crtc inactive
  drm: msm: dp: Fix DSC and FEC handling
  disp: msm: dp: Detect failure cases when setting mst topology manager
  disp: msm: dp: Fix dp mst vcpi slot management for inactive crtcs
  disp: msm: dp: Fix incorrect NULL pointer check
  disp: msm: sde: increase rsc min_threshold time
  disp: msm: sde: fix panel mode check during SUI validations
  disp: msm: dsi: refine the logic for mode filling and calculation
  drm/msm/dsi-staging: update dsi debug bus error message
  disp: drm: dp: wait for a graceful dp_mst disable
  disp: msm: sde: ensure input handler unregistration in command mode
  disp: msm: dp: fix locking in hotplug processing
  disp: msm: dsi: enable multi mode support for video mode panel
  disp: msm: sde: fix panel mode check during secure transition
  disp: msm: sde: Get connector roi state even PU disabled
  disp: msm: sde: Add check to avoid list double add
  disp: msm: sde: revert to previous smmu state upon failure to switch
  disp: msm: dp: fix for dp link layer cts failures
  disp: msm: dsi: Enable ESD check after POMS done
  disp: msm: dp: remove session lock for power state update
  disp: msm: dp: clear scrambler bypass for test pattern 4
  disp: pll: update dynamic dsi clock sequence
  drm: msm: dp: turn off audio before dp lane status toggling
  disp: msm: add length check for debugfs_ctrl
  drm:msm:dsi: Add check for max controller count
  drm: sde: Remove feature support for IGC/3D LUT for virtual planes
  drm: msm: dp: change voltage swing levels for lito/kona

Change-Id: Ie2f7020274c75192228a9b367132b1a53d1301c8
Signed-off-by: Alisha Thapaliya <[email protected]>
Alisha Thapaliya 5 jaren geleden
bovenliggende
commit
388e19fcd4
51 gewijzigde bestanden met toevoegingen van 1073 en 412 verwijderingen
  1. 8 0
      Makefile
  2. 16 0
      config/bengaldisp.conf
  3. 15 0
      config/bengaldispconf.h
  4. 6 1
      msm/dp/dp_audio.c
  5. 1 1
      msm/dp/dp_catalog.c
  6. 1 1
      msm/dp/dp_catalog_v420.c
  7. 18 11
      msm/dp/dp_ctrl.c
  8. 1 1
      msm/dp/dp_ctrl.h
  9. 84 23
      msm/dp/dp_display.c
  10. 13 5
      msm/dp/dp_drm.c
  11. 13 0
      msm/dp/dp_drm.h
  12. 1 1
      msm/dp/dp_link.c
  13. 22 5
      msm/dp/dp_mst_drm.c
  14. 46 30
      msm/dp/dp_panel.c
  15. 2 0
      msm/dsi/dsi_ctrl_hw.h
  16. 1 1
      msm/dsi/dsi_ctrl_hw_cmn.c
  17. 74 105
      msm/dsi/dsi_display.c
  18. 1 1
      msm/dsi/dsi_display.h
  19. 5 1
      msm/dsi/dsi_drm.c
  20. 70 3
      msm/dsi/dsi_panel.c
  21. 14 0
      msm/dsi/dsi_panel.h
  22. 48 0
      msm/dsi/dsi_pwr.c
  23. 18 0
      msm/dsi/dsi_pwr.h
  24. 2 1
      msm/msm_atomic.c
  25. 27 12
      msm/msm_drv.c
  26. 4 0
      msm/msm_drv.h
  27. 4 0
      msm/msm_kms.h
  28. 37 2
      msm/sde/sde_color_processing.c
  29. 8 0
      msm/sde/sde_color_processing.h
  30. 5 5
      msm/sde/sde_core_perf.c
  31. 18 10
      msm/sde/sde_crtc.c
  32. 10 6
      msm/sde/sde_crtc.h
  33. 43 8
      msm/sde/sde_encoder.c
  34. 10 1
      msm/sde/sde_encoder.h
  35. 32 0
      msm/sde/sde_encoder_phys.h
  36. 130 25
      msm/sde/sde_encoder_phys_cmd.c
  37. 4 3
      msm/sde/sde_hw_catalog.c
  38. 9 6
      msm/sde/sde_hw_color_proc_v4.c
  39. 1 0
      msm/sde/sde_hw_ctl.c
  40. 18 8
      msm/sde/sde_hw_reg_dma_v1_color_proc.c
  41. 21 5
      msm/sde/sde_hw_sspp.c
  42. 108 23
      msm/sde/sde_kms.c
  43. 2 0
      msm/sde/sde_kms.h
  44. 10 2
      msm/sde/sde_plane.c
  45. 17 11
      msm/sde/sde_rm.c
  46. 19 4
      msm/sde/sde_wb.h
  47. 3 0
      msm/sde_dbg.c
  48. 4 3
      msm/sde_rsc.c
  49. 7 2
      msm/sde_rsc_hw.c
  50. 5 44
      pll/dsi_pll_10nm.c
  51. 37 41
      pll/dsi_pll_7nm.c

+ 8 - 0
Makefile

@@ -17,6 +17,14 @@ ifeq ($(CONFIG_ARCH_LITO), y)
 LINUXINCLUDE    += -include $(srctree)/techpack/display/config/saipdispconf.h
 endif
 
+ifeq ($(CONFIG_ARCH_BENGAL), y)
+include $(srctree)/techpack/display/config/bengaldisp.conf
+endif
+
+ifeq ($(CONFIG_ARCH_BENGAL), y)
+LINUXINCLUDE    += -include $(srctree)/techpack/display/config/bengaldispconf.h
+endif
+
 obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_MSM_SDE_ROTATOR) += rotator/
 obj-$(CONFIG_QCOM_MDSS_PLL) += pll/

+ 16 - 0
config/bengaldisp.conf

@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (c) 2019, The Linux Foundation. All rights reserved.
+
+export CONFIG_DRM_MSM=y
+export CONFIG_DRM_MSM_SDE=y
+export CONFIG_SYNC_FILE=y
+export CONFIG_DRM_MSM_DSI=y
+export CONFIG_DRM_MSM_DP=n
+export CONFIG_QCOM_MDSS_DP_PLL=n
+export CONFIG_DSI_PARSER=y
+export CONFIG_DRM_SDE_WB=n
+export CONFIG_DRM_MSM_REGISTER_LOGGING=y
+export CONFIG_QCOM_MDSS_PLL=y
+export CONFIG_MSM_SDE_ROTATOR=y
+export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+export CONFIG_DRM_SDE_RSC=n

+ 15 - 0
config/bengaldispconf.h

@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_DRM_MSM 1
+#define CONFIG_DRM_MSM_SDE 1
+#define CONFIG_SYNC_FILE 1
+#define CONFIG_DRM_MSM_DSI 1
+#define CONFIG_DSI_PARSER 1
+#define CONFIG_DRM_MSM_REGISTER_LOGGING 1
+#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
+#define CONFIG_QCOM_MDSS_PLL 1
+#define CONFIG_MSM_SDE_ROTATOR 1
+#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1

+ 6 - 1
msm/dp/dp_audio.c

@@ -377,7 +377,7 @@ static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
 	}
 
 	dp_audio = ext_data->intf_data;
-	if (!ext_data) {
+	if (!dp_audio) {
 		DP_ERR("invalid intf data\n");
 		return ERR_PTR(-EINVAL);
 	}
@@ -424,6 +424,11 @@ static int dp_audio_get_edid_blk(struct platform_device *pdev,
 	struct dp_audio_private *audio;
 	struct sde_edid_ctrl *edid;
 
+	if (!blk) {
+		DP_ERR("invalid input\n");
+		return -EINVAL;
+	}
+
 	audio = dp_audio_get_data(pdev);
 	if (IS_ERR(audio)) {
 		rc = PTR_ERR(audio);

+ 1 - 1
msm/dp/dp_catalog.c

@@ -1824,7 +1824,7 @@ static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl,
 		dp_write(DP_MAINLINK_CTRL, value);
 		break;
 	case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
-		dp_write(DP_MAINLINK_CTRL, 0x11);
+		dp_write(DP_MAINLINK_CTRL, 0x01);
 		dp_write(DP_STATE_CTRL, 0x8);
 		break;
 	default:

+ 1 - 1
msm/dp/dp_catalog_v420.c

@@ -65,7 +65,7 @@ static u8 const dp_pre_emp_hbr_rbr[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
 static u8 const dp_swing_hbr_rbr[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
 	{0x08, 0x0F, 0x16, 0x1F}, /* sw0, 0.4v */
 	{0x11, 0x1E, 0x1F, 0xFF}, /* sw1, 0.6v */
-	{0x1A, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8v */
+	{0x16, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8v */
 	{0x1F, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2v */
 };
 

+ 18 - 11
msm/dp/dp_ctrl.c

@@ -69,6 +69,7 @@ struct dp_ctrl_private {
 	bool power_on;
 	bool mst_mode;
 	bool fec_mode;
+	bool dsc_mode;
 
 	atomic_t aborted;
 
@@ -575,7 +576,6 @@ end:
 static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl)
 {
 	int ret = 0;
-	const unsigned int fec_cfg_dpcd = 0x120;
 
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
 		goto end;
@@ -588,7 +588,8 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl)
 	ctrl->catalog->reset(ctrl->catalog);
 
 	if (ctrl->fec_mode)
-		drm_dp_dpcd_writeb(ctrl->aux->drm_aux, fec_cfg_dpcd, 0x01);
+		drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_FEC_CONFIGURATION,
+				0x01);
 
 	ret = dp_ctrl_link_train(ctrl);
 
@@ -926,7 +927,7 @@ static void dp_ctrl_process_phy_test_request(struct dp_ctrl *dp_ctrl)
 	ctrl->aux->init(ctrl->aux, ctrl->parser->aux_cfg);
 
 	ret = ctrl->dp_ctrl.on(&ctrl->dp_ctrl, ctrl->mst_mode,
-					ctrl->fec_mode, false);
+			ctrl->fec_mode, ctrl->dsc_mode, false);
 	if (ret)
 		DP_ERR("failed to enable DP controller\n");
 
@@ -1138,19 +1139,18 @@ static void dp_ctrl_fec_dsc_setup(struct dp_ctrl_private *ctrl)
 	u8 fec_sts = 0;
 	int rlen;
 	u32 dsc_enable;
-	const unsigned int fec_sts_dpcd = 0x280;
 
-	if (ctrl->stream_count || !ctrl->fec_mode)
+	if (!ctrl->fec_mode)
 		return;
 
 	ctrl->catalog->fec_config(ctrl->catalog, ctrl->fec_mode);
 
 	/* wait for controller to start fec sequence */
 	usleep_range(900, 1000);
-	drm_dp_dpcd_readb(ctrl->aux->drm_aux, fec_sts_dpcd, &fec_sts);
+	drm_dp_dpcd_readb(ctrl->aux->drm_aux, DP_FEC_STATUS, &fec_sts);
 	DP_DEBUG("sink fec status:%d\n", fec_sts);
 
-	dsc_enable = ctrl->fec_mode ? 1 : 0;
+	dsc_enable = ctrl->dsc_mode ? 1 : 0;
 	rlen = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_DSC_ENABLE,
 			dsc_enable);
 	if (rlen < 1)
@@ -1191,13 +1191,14 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
 
 	dp_ctrl_wait4video_ready(ctrl);
 
-	dp_ctrl_fec_dsc_setup(ctrl);
-
 	ctrl->stream_count++;
 
 	link_ready = ctrl->catalog->mainlink_ready(ctrl->catalog);
 	DP_DEBUG("mainlink %s\n", link_ready ? "READY" : "NOT READY");
 
+	/* wait for link training completion before fec config as per spec */
+	dp_ctrl_fec_dsc_setup(ctrl);
+
 	return rc;
 }
 
@@ -1266,7 +1267,7 @@ static void dp_ctrl_stream_off(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
 }
 
 static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode,
-				bool fec_mode, bool shallow)
+		bool fec_mode, bool dsc_mode, bool shallow)
 {
 	int rc = 0;
 	struct dp_ctrl_private *ctrl;
@@ -1285,7 +1286,11 @@ static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode,
 		goto end;
 
 	ctrl->mst_mode = mst_mode;
-	ctrl->fec_mode = fec_mode;
+	if (fec_mode) {
+		ctrl->fec_mode = fec_mode;
+		ctrl->dsc_mode = dsc_mode;
+	}
+
 	rate = ctrl->panel->link_info.rate;
 
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
@@ -1323,6 +1328,7 @@ static void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
 	if (!ctrl->power_on)
 		return;
 
+	ctrl->catalog->fec_config(ctrl->catalog, false);
 	dp_ctrl_configure_source_link_params(ctrl, false);
 	ctrl->catalog->reset(ctrl->catalog);
 
@@ -1333,6 +1339,7 @@ static void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
 
 	ctrl->mst_mode = false;
 	ctrl->fec_mode = false;
+	ctrl->dsc_mode = false;
 	ctrl->power_on = false;
 	memset(&ctrl->mst_ch_info, 0, sizeof(ctrl->mst_ch_info));
 	DP_DEBUG("DP off done\n");

+ 1 - 1
msm/dp/dp_ctrl.h

@@ -17,7 +17,7 @@ struct dp_ctrl {
 	int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
 	void (*deinit)(struct dp_ctrl *dp_ctrl);
 	int (*on)(struct dp_ctrl *dp_ctrl, bool mst_mode, bool fec_en,
-			bool shallow);
+			bool dsc_en, bool shallow);
 	void (*off)(struct dp_ctrl *dp_ctrl);
 	void (*abort)(struct dp_ctrl *dp_ctrl);
 	void (*isr)(struct dp_ctrl *dp_ctrl);

+ 84 - 23
msm/dp/dp_display.c

@@ -45,10 +45,12 @@ enum dp_display_states {
 	DP_STATE_INITIALIZED            = BIT(1),
 	DP_STATE_READY                  = BIT(2),
 	DP_STATE_CONNECTED              = BIT(3),
-	DP_STATE_ENABLED                = BIT(4),
-	DP_STATE_SUSPENDED              = BIT(5),
-	DP_STATE_ABORTED                = BIT(6),
-	DP_STATE_HDCP_ABORTED           = BIT(7),
+	DP_STATE_CONNECT_NOTIFIED       = BIT(4),
+	DP_STATE_DISCONNECT_NOTIFIED    = BIT(5),
+	DP_STATE_ENABLED                = BIT(6),
+	DP_STATE_SUSPENDED              = BIT(7),
+	DP_STATE_ABORTED                = BIT(8),
+	DP_STATE_HDCP_ABORTED           = BIT(9),
 };
 
 static char *dp_display_state_name(enum dp_display_states state)
@@ -74,6 +76,14 @@ static char *dp_display_state_name(enum dp_display_states state)
 		len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
 			"CONNECTED");
 
+	if (state & DP_STATE_CONNECT_NOTIFIED)
+		len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
+			"CONNECT_NOTIFIED");
+
+	if (state & DP_STATE_DISCONNECT_NOTIFIED)
+		len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
+			"DISCONNECT_NOTIFIED");
+
 	if (state & DP_STATE_ENABLED)
 		len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
 			"ENABLED");
@@ -653,6 +663,14 @@ static void dp_display_send_hpd_event(struct dp_display_private *dp)
 	envp[4] = NULL;
 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
 			envp);
+
+	if (connector->status == connector_status_connected) {
+		dp_display_state_add(DP_STATE_CONNECT_NOTIFIED);
+		dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED);
+	} else {
+		dp_display_state_add(DP_STATE_DISCONNECT_NOTIFIED);
+		dp_display_state_remove(DP_STATE_CONNECT_NOTIFIED);
+	}
 }
 
 static int dp_display_send_hpd_notification(struct dp_display_private *dp)
@@ -833,8 +851,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 	if (dp_display_state_is(DP_STATE_CONNECTED)) {
 		DP_DEBUG("dp already connected, skipping hpd high\n");
 		mutex_unlock(&dp->session_lock);
-		rc = -EISCONN;
-		goto end;
+		return -EISCONN;
 	}
 
 	dp_display_state_add(DP_STATE_CONNECTED);
@@ -867,7 +884,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 	dp_display_process_mst_hpd_high(dp, false);
 
 	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
-				dp->panel->fec_en, false);
+			dp->panel->fec_en, dp->panel->dsc_en, false);
 	if (rc) {
 		dp_display_state_remove(DP_STATE_CONNECTED);
 		goto end;
@@ -911,7 +928,9 @@ static int dp_display_process_hpd_low(struct dp_display_private *dp)
 
 	dp_display_process_mst_hpd_low(dp);
 
-	if (dp_display_state_is(DP_STATE_ENABLED) && !dp->mst.mst_active)
+	if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
+			dp_display_state_is(DP_STATE_ENABLED)) &&
+			!dp->mst.mst_active)
 		rc = dp_display_send_hpd_notification(dp);
 
 	mutex_lock(&dp->session_lock);
@@ -991,6 +1010,30 @@ static void dp_display_stream_disable(struct dp_display_private *dp,
 	dp->active_stream_cnt--;
 }
 
+static void dp_audio_enable(struct dp_display_private *dp, bool enable)
+{
+	struct dp_panel *dp_panel;
+	int idx;
+
+	for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) {
+		if (!dp->active_panels[idx])
+			continue;
+		dp_panel = dp->active_panels[idx];
+
+		if (dp_panel->audio_supported) {
+			if (enable) {
+				dp_panel->audio->bw_code =
+					dp->link->link_params.bw_code;
+				dp_panel->audio->lane_count =
+					dp->link->link_params.lane_count;
+				dp_panel->audio->on(dp->panel->audio);
+			} else {
+				dp_panel->audio->off(dp_panel->audio);
+			}
+		}
+	}
+}
+
 static void dp_display_clean(struct dp_display_private *dp)
 {
 	int idx;
@@ -1082,10 +1125,8 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
 
 	dp_display_state_remove(DP_STATE_CONFIGURED);
 
-	mutex_lock(&dp->session_lock);
 	if (dp->debug->psm_enabled && dp_display_state_is(DP_STATE_READY))
 		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
-	mutex_unlock(&dp->session_lock);
 
 	dp_display_disconnect_sync(dp);
 
@@ -1178,19 +1219,33 @@ static void dp_display_attention_work(struct work_struct *work)
 		goto mst_attention;
 	}
 
-	if (dp->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
-		dp->ctrl->process_phy_test_request(dp->ctrl);
-		goto mst_attention;
-	}
+	if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN |
+		DP_TEST_LINK_TRAINING | DP_LINK_STATUS_UPDATED)) {
 
-	if (dp->link->sink_request & DP_TEST_LINK_TRAINING) {
-		dp->link->send_test_response(dp->link);
-		dp->ctrl->link_maintenance(dp->ctrl);
-		goto mst_attention;
+		mutex_lock(&dp->session_lock);
+		dp_audio_enable(dp, false);
+		mutex_unlock(&dp->session_lock);
+
+		if (dp->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+			dp->ctrl->process_phy_test_request(dp->ctrl);
+
+		if (dp->link->sink_request & DP_TEST_LINK_TRAINING) {
+			dp->link->send_test_response(dp->link);
+			dp->ctrl->link_maintenance(dp->ctrl);
+		}
+
+		if (dp->link->sink_request & DP_LINK_STATUS_UPDATED)
+			dp->ctrl->link_maintenance(dp->ctrl);
+
+		mutex_lock(&dp->session_lock);
+		dp_audio_enable(dp, true);
+		mutex_unlock(&dp->session_lock);
+
+		if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN |
+			DP_TEST_LINK_TRAINING))
+			goto mst_attention;
 	}
 
-	if (dp->link->sink_request & DP_LINK_STATUS_UPDATED)
-		dp->ctrl->link_maintenance(dp->ctrl);
 cp_irq:
 	if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq)
 		dp->hdcp.ops->cp_irq(dp->hdcp.data);
@@ -1608,7 +1663,8 @@ static int dp_display_prepare(struct dp_display *dp_display, void *panel)
 	 * So, we execute in shallow mode here to do only minimal
 	 * and required things.
 	 */
-	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, dp_panel->fec_en, true);
+	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, dp_panel->fec_en,
+			dp_panel->dsc_en, true);
 	if (rc)
 		goto end;
 
@@ -2004,6 +2060,7 @@ static enum drm_mode_status dp_display_validate_mode(
 	struct dp_display_mode dp_mode;
 	bool dsc_en;
 	u32 num_lm = 0;
+	int rc = 0;
 
 	if (!dp_display || !mode || !panel ||
 			!avail_res || !avail_res->max_mixer_width) {
@@ -2049,8 +2106,12 @@ static enum drm_mode_status dp_display_validate_mode(
 		goto end;
 	}
 
-	num_lm = (avail_res->max_mixer_width <= mode->hdisplay) ?
-			2 : 1;
+	rc = msm_get_mixer_count(dp->priv, mode, avail_res, &num_lm);
+	if (rc) {
+		DP_ERR("error getting mixer count. rc:%d\n", rc);
+		goto end;
+	}
+
 	if (num_lm > avail_res->num_lm ||
 			(num_lm == 2 && !avail_res->num_3dmux)) {
 		DP_MST_DEBUG("num_lm:%d, req lm:%d 3dmux:%d\n", num_lm,

+ 13 - 5
msm/dp/dp_drm.c

@@ -376,8 +376,6 @@ int dp_connector_get_mode_info(struct drm_connector *connector,
 		struct msm_mode_info *mode_info,
 		void *display, const struct msm_resource_caps_info *avail_res)
 {
-	const u32 dual_lm = 2;
-	const u32 single_lm = 1;
 	const u32 single_intf = 1;
 	const u32 no_enc = 0;
 	struct msm_display_topology *topology;
@@ -385,9 +383,12 @@ int dp_connector_get_mode_info(struct drm_connector *connector,
 	struct dp_panel *dp_panel;
 	struct dp_display_mode dp_mode;
 	struct dp_display *dp_disp = display;
+	struct msm_drm_private *priv;
+	int rc = 0;
 
 	if (!drm_mode || !mode_info || !avail_res ||
-			!avail_res->max_mixer_width || !connector || !display) {
+			!avail_res->max_mixer_width || !connector || !display ||
+			!connector->dev || !connector->dev->dev_private) {
 		DP_ERR("invalid params\n");
 		return -EINVAL;
 	}
@@ -396,10 +397,17 @@ int dp_connector_get_mode_info(struct drm_connector *connector,
 
 	sde_conn = to_sde_connector(connector);
 	dp_panel = sde_conn->drv_panel;
+	priv = connector->dev->dev_private;
 
 	topology = &mode_info->topology;
-	topology->num_lm = (avail_res->max_mixer_width < drm_mode->hdisplay) ?
-							dual_lm : single_lm;
+
+	rc = msm_get_mixer_count(priv, drm_mode, avail_res,
+			&topology->num_lm);
+	if (rc) {
+		DP_ERR("error getting mixer count. rc:%d\n", rc);
+		return rc;
+	}
+
 	topology->num_enc = no_enc;
 	topology->num_intf = single_intf;
 

+ 13 - 0
msm/dp/dp_drm.h

@@ -184,6 +184,19 @@ static inline int dp_connector_config_hdr(struct drm_connector *connector,
 	return 0;
 }
 
+int dp_connector_atomic_check(struct drm_connector *connector,
+	void *display,
+	struct drm_connector_state *c_state)
+{
+	return 0;
+}
+
+int dp_connector_set_colorspace(struct drm_connector *connector,
+	void *display)
+{
+	return 0;
+}
+
 static inline int dp_connector_post_init(struct drm_connector *connector,
 		void *display)
 {

+ 1 - 1
msm/dp/dp_link.c

@@ -1122,7 +1122,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
 		return -EINVAL;
 
 	DP_DEBUG("channel_eq_done = %d, clock_recovery_done = %d\n",
-			drm_dp_clock_recovery_ok(link->link_status,
+			drm_dp_channel_eq_ok(link->link_status,
 			link->dp_link.link_params.lane_count),
 			drm_dp_clock_recovery_ok(link->link_status,
 			link->dp_link.link_params.lane_count));

+ 22 - 5
msm/dp/dp_mst_drm.c

@@ -403,10 +403,16 @@ static int dp_mst_sim_topology_mgr_set_mst(
 		struct drm_dp_mst_topology_mgr *mgr,
 		bool mst_state)
 {
+	int rc;
 	struct dp_mst_private *mst = container_of(mgr,
 			struct dp_mst_private, mst_mgr);
 
-	drm_dp_mst_topology_mgr_set_mst(mgr, mst_state);
+	rc = drm_dp_mst_topology_mgr_set_mst(mgr, mst_state);
+	if (rc < 0) {
+		DRM_ERROR("unable to set mst topology mgr, rc: %d\n", rc);
+		return rc;
+	}
+
 	if (mst_state)
 		queue_work(system_long_wq, &mst->simulator.probe_work);
 
@@ -1451,7 +1457,7 @@ mode_set:
 
 	crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
 
-	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+	if (drm_atomic_crtc_needs_modeset(crtc_state) && crtc_state->active) {
 		c_conn = to_sde_connector(connector);
 
 		dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel,
@@ -1945,9 +1951,12 @@ static void dp_mst_display_hpd(void *dp_display, bool hpd_status,
 	mst->mst_session_state = hpd_status;
 	mutex_unlock(&mst->mst_lock);
 
-	if (!hpd_status)
+	if (!hpd_status) {
 		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
 				hpd_status);
+		if (rc < 0)
+			goto fail;
+	}
 
 	if (info && !info->mst_protocol) {
 		if (hpd_status) {
@@ -1959,13 +1968,21 @@ static void dp_mst_display_hpd(void *dp_display, bool hpd_status,
 		mst->mst_fw_cbs = &drm_dp_mst_fw_helper_ops;
 	}
 
-	if (hpd_status)
+	if (hpd_status) {
 		rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
 				hpd_status);
+		if (rc < 0)
+			goto fail;
+	}
 
 	dp_mst_hpd_event_notify(mst, hpd_status);
 
-	DP_MST_INFO_LOG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
+	DP_MST_INFO_LOG("mst display hpd success. hpd:%d, rc:%d\n", hpd_status,
+			rc);
+	return;
+fail:
+	DRM_ERROR("mst display hpd failed. hpd: %d, rc: %d\n",
+			hpd_status, rc);
 }
 
 static void dp_mst_display_hpd_irq(void *dp_display,

+ 46 - 30
msm/dp/dp_panel.c

@@ -2019,14 +2019,7 @@ end:
 
 static void dp_panel_decode_dsc_dpcd(struct dp_panel *dp_panel)
 {
-	s64 fec_overhead_fp = drm_fixp_from_fraction(1, 1);
-
-	if (!dp_panel->dsc_feature_enable || !dp_panel->fec_feature_enable) {
-		DP_DEBUG("source dsc is not supported\n");
-		return;
-	}
-
-	if (dp_panel->dsc_dpcd[0] && dp_panel->fec_dpcd) {
+	if (dp_panel->dsc_dpcd[0]) {
 		dp_panel->sink_dsc_caps.dsc_capable = true;
 		dp_panel->sink_dsc_caps.version = dp_panel->dsc_dpcd[1];
 		dp_panel->sink_dsc_caps.block_pred_en =
@@ -2039,21 +2032,13 @@ static void dp_panel_decode_dsc_dpcd(struct dp_panel *dp_panel)
 		dp_panel->dsc_en = false;
 	}
 
-	dp_panel->fec_en = dp_panel->dsc_en;
 	dp_panel->widebus_en = dp_panel->dsc_en;
-
-	/* fec_overhead = 1.00 / 0.97582 */
-	if (dp_panel->fec_en)
-		fec_overhead_fp = drm_fixp_from_fraction(100000, 97582);
-
-	dp_panel->fec_overhead_fp = fec_overhead_fp;
 }
 
 static void dp_panel_read_sink_dsc_caps(struct dp_panel *dp_panel)
 {
 	int rlen;
 	struct dp_panel_private *panel;
-	const int fec_cap = 0x90;
 	int dpcd_rev;
 
 	if (!dp_panel) {
@@ -2061,14 +2046,9 @@ static void dp_panel_read_sink_dsc_caps(struct dp_panel *dp_panel)
 		return;
 	}
 
-	dp_panel->dsc_en = false;
-	dp_panel->fec_en = false;
-
 	dpcd_rev = dp_panel->dpcd[DP_DPCD_REV];
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	dp_panel->fec_overhead_fp = 0;
 	if (panel->parser->dsc_feature_enable && dpcd_rev >= 0x14) {
 		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DSC_SUPPORT,
 			dp_panel->dsc_dpcd, (DP_RECEIVER_DSC_CAP_SIZE + 1));
@@ -2081,17 +2061,38 @@ static void dp_panel_read_sink_dsc_caps(struct dp_panel *dp_panel)
 			DUMP_PREFIX_NONE, 8, 1, dp_panel->dsc_dpcd, rlen,
 			false);
 
-		rlen = drm_dp_dpcd_read(panel->aux->drm_aux, fec_cap,
-			&dp_panel->fec_dpcd, 1);
-		if (rlen < 1) {
-			DP_ERR("fec dpcd read failed, rlen=%d\n", rlen);
-			return;
-		}
-
 		dp_panel_decode_dsc_dpcd(dp_panel);
 	}
 }
 
+static void dp_panel_read_sink_fec_caps(struct dp_panel *dp_panel)
+{
+	int rlen;
+	struct dp_panel_private *panel;
+	s64 fec_overhead_fp = drm_fixp_from_fraction(1, 1);
+
+	if (!dp_panel) {
+		DP_ERR("invalid input\n");
+		return;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	rlen = drm_dp_dpcd_readb(panel->aux->drm_aux, DP_FEC_CAPABILITY,
+			&dp_panel->fec_dpcd);
+	if (rlen < 1) {
+		DP_ERR("fec capability read failed, rlen=%d\n", rlen);
+		return;
+	}
+
+	dp_panel->fec_en = dp_panel->fec_dpcd & DP_FEC_CAPABLE;
+	if (dp_panel->fec_en)
+		fec_overhead_fp = drm_fixp_from_fraction(100000, 97582);
+
+	dp_panel->fec_overhead_fp = fec_overhead_fp;
+
+	return;
+}
+
 static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
 	struct drm_connector *connector, bool multi_func)
 {
@@ -2148,7 +2149,18 @@ static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
 	dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable;
 	dp_panel->fec_feature_enable = panel->parser->fec_feature_enable;
 
-	dp_panel_read_sink_dsc_caps(dp_panel);
+	dp_panel->fec_en = false;
+	dp_panel->dsc_en = false;
+
+	if (dp_panel->fec_feature_enable) {
+		dp_panel_read_sink_fec_caps(dp_panel);
+
+		if (dp_panel->dsc_feature_enable && dp_panel->fec_en)
+			dp_panel_read_sink_dsc_caps(dp_panel);
+	}
+
+	DP_INFO("fec_en=%d, dsc_en=%d, widebus_en=%d\n", dp_panel->fec_en,
+			dp_panel->dsc_en, dp_panel->widebus_en);
 end:
 	return rc;
 }
@@ -2157,9 +2169,13 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
 		u32 mode_edid_bpp, u32 mode_pclk_khz)
 {
 	struct drm_dp_link *link_info;
-	const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+	const u32 max_supported_bpp = 30;
+	u32 min_supported_bpp = 18;
 	u32 bpp = 0, data_rate_khz = 0;
 
+	if (dp_panel->dsc_en)
+		min_supported_bpp = 24;
+
 	bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
 
 	link_info = &dp_panel->link_info;

+ 2 - 0
msm/dsi/dsi_ctrl_hw.h

@@ -17,6 +17,8 @@
 		fmt, c ? c->index : -1,	##__VA_ARGS__)
 #define DSI_CTRL_HW_ERR(c, fmt, ...)	DRM_DEV_ERROR(NULL, "[msm-dsi-error]: DSI_%d: "\
 		fmt, c ? c->index : -1,	##__VA_ARGS__)
+#define DSI_CTRL_HW_INFO(c, fmt, ...)	DRM_DEV_INFO(NULL, "[msm-dsi-info]: DSI_%d: "\
+		fmt, c ? c->index : -1,	##__VA_ARGS__)
 
 /**
  * Modifier flag for command transmission. If this flag is set, command

+ 1 - 1
msm/dsi/dsi_ctrl_hw_cmn.c

@@ -509,7 +509,7 @@ void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size)
 		/* make sure that debug test point is enabled */
 		wmb();
 		reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
-		DSI_CTRL_HW_ERR(ctrl, "debug bus ctrl: 0x%x status:0x%x\n",
+		DSI_CTRL_HW_INFO(ctrl, "debug bus ctrl: 0x%x status:0x%x\n",
 				entries[i], reg);
 	}
 }

+ 74 - 105
msm/dsi/dsi_display.c

@@ -1060,10 +1060,22 @@ int dsi_display_set_power(struct drm_connector *connector,
 	case SDE_MODE_DPMS_LP2:
 		rc = dsi_panel_set_lp2(display->panel);
 		break;
-	default:
-		rc = dsi_panel_set_nolp(display->panel);
+	case SDE_MODE_DPMS_ON:
+		if ((display->panel->power_mode == SDE_MODE_DPMS_LP1) ||
+			(display->panel->power_mode == SDE_MODE_DPMS_LP2))
+			rc = dsi_panel_set_nolp(display->panel);
 		break;
+	case SDE_MODE_DPMS_OFF:
+	default:
+		return rc;
 	}
+
+	DSI_DEBUG("Power mode transition from %d to %d %s",
+			display->panel->power_mode, power_mode,
+			rc ? "failed" : "successful");
+	if (!rc)
+		display->panel->power_mode = power_mode;
+
 	return rc;
 }
 
@@ -3839,7 +3851,7 @@ static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
 
 	display->config.bit_clk_rate_hz = bit_clk_rate;
 
-	for (i = 0; i < display->ctrl_count; i++) {
+	display_for_each_ctrl(i, display) {
 		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
 		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
 		u32 num_of_lanes = 0, bpp;
@@ -3967,8 +3979,7 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
 		goto exit;
 	}
 
-	for (i = 0; (i < display->ctrl_count) &&
-	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		if (!ctrl->ctrl)
 			continue;
@@ -3986,8 +3997,7 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
 		}
 	}
 
-	for (i = 0; (i < display->ctrl_count) &&
-	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		if (ctrl == m_ctrl)
 			continue;
@@ -3996,8 +4006,7 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
 	dsi_phy_dynamic_refresh_trigger(m_ctrl->phy, true);
 
 	/* wait for dynamic refresh done */
-	for (i = 0; (i < display->ctrl_count) &&
-	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		rc = dsi_ctrl_wait4dynamic_refresh_done(ctrl->ctrl);
 		if (rc) {
@@ -4009,8 +4018,7 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
 		}
 	}
 
-	for (i = 0; (i < display->ctrl_count) &&
-	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		dsi_phy_dynamic_refresh_clear(ctrl->phy);
 	}
@@ -4025,8 +4033,7 @@ static int _dsi_display_dyn_update_clks(struct dsi_display *display,
 	return rc;
 
 recover_pix_clk:
-	for (i = 0; (i < display->ctrl_count) &&
-	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		if (!ctrl->ctrl)
 			continue;
@@ -4035,8 +4042,7 @@ recover_pix_clk:
 	}
 
 recover_byte_clk:
-	for (i = 0; (i < display->ctrl_count) &&
-	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		if (!ctrl->ctrl)
 			continue;
@@ -4070,7 +4076,7 @@ static int dsi_display_dynamic_clk_switch_vid(struct dsi_display *display,
 	dsi_display_mask_ctrl_error_interrupts(display, mask, true);
 
 	/* update the phy timings based on new mode */
-	for (i = 0; i < display->ctrl_count; i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		dsi_phy_update_phy_timings(ctrl->phy, &display->config);
 	}
@@ -4090,7 +4096,7 @@ static int dsi_display_dynamic_clk_switch_vid(struct dsi_display *display,
 	_dsi_display_calc_pipe_delay(display, &delay, mode);
 
 	/* configure dynamic refresh ctrl registers */
-	for (i = 0; i < display->ctrl_count; i++) {
+	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
 		if (!ctrl->phy)
 			continue;
@@ -5756,55 +5762,17 @@ error:
 	return rc;
 }
 
-static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
-			u32 *count)
-{
-	struct dsi_dfps_capabilities dfps_caps;
-	struct dsi_dyn_clk_caps *dyn_clk_caps;
-	int num_dfps_rates, num_bit_clks, rc = 0;
-
-	if (!display || !display->panel) {
-		DSI_ERR("invalid display:%d panel:%d\n", display != NULL,
-				display ? display->panel != NULL : 0);
-		return -EINVAL;
-	}
-
-	*count = display->panel->num_timing_nodes;
-
-	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (rc) {
-		DSI_ERR("[%s] failed to get dfps caps from panel\n",
-				display->name);
-		return rc;
-	}
-
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
-
-	dyn_clk_caps = &(display->panel->dyn_clk_caps);
-
-	num_bit_clks = !dyn_clk_caps->dyn_clk_support ? 1 :
-					dyn_clk_caps->bit_clk_list_len;
-
-	/* Inflate num_of_modes by fps and bit clks in dfps */
-	*count = display->panel->num_timing_nodes *
-				num_dfps_rates * num_bit_clks;
-
-	return 0;
-}
-
 int dsi_display_get_mode_count(struct dsi_display *display,
 			u32 *count)
 {
-	int rc;
-
 	if (!display || !display->panel) {
 		DSI_ERR("invalid display:%d panel:%d\n", display != NULL,
-				display ? display->panel != NULL : 0);
+			display ? display->panel != NULL : 0);
 		return -EINVAL;
 	}
 
 	mutex_lock(&display->display_lock);
-	rc = dsi_display_get_mode_count_no_lock(display, count);
+	*count = display->panel->num_display_modes;
 	mutex_unlock(&display->display_lock);
 
 	return 0;
@@ -5837,7 +5805,7 @@ static void _dsi_display_populate_bit_clks(struct dsi_display *display,
 	if (cfg->data_lanes & DSI_DATA_LANE_3)
 		lanes++;
 
-	dsi_display_get_mode_count_no_lock(display, &total_modes);
+	total_modes = display->panel->num_display_modes;
 
 	for (i = start; i < end; i++) {
 		src = &display->modes[i];
@@ -5889,8 +5857,8 @@ int dsi_display_get_modes(struct dsi_display *display,
 	struct dsi_dfps_capabilities dfps_caps;
 	struct dsi_display_ctrl *ctrl;
 	struct dsi_host_common_cfg *host = &display->panel->host_config;
-	bool is_split_link;
-	u32 num_dfps_rates, panel_mode_count, total_mode_count;
+	bool is_split_link, is_cmd_mode;
+	u32 num_dfps_rates, timing_mode_count, display_mode_count;
 	u32 sublinks_count, mode_idx, array_idx = 0;
 	struct dsi_dyn_clk_caps *dyn_clk_caps;
 	int i, start, end, rc = -EINVAL;
@@ -5908,11 +5876,9 @@ int dsi_display_get_modes(struct dsi_display *display,
 	if (display->modes)
 		goto exit;
 
-	rc = dsi_display_get_mode_count_no_lock(display, &total_mode_count);
-	if (rc)
-		goto error;
+	display_mode_count = display->panel->num_display_modes;
 
-	display->modes = kcalloc(total_mode_count, sizeof(*display->modes),
+	display->modes = kcalloc(display_mode_count, sizeof(*display->modes),
 			GFP_KERNEL);
 	if (!display->modes) {
 		rc = -ENOMEM;
@@ -5930,20 +5896,20 @@ int dsi_display_get_modes(struct dsi_display *display,
 
 	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
 
-	panel_mode_count = display->panel->num_timing_nodes;
+	timing_mode_count = display->panel->num_timing_nodes;
 
-	for (mode_idx = 0; mode_idx < panel_mode_count; mode_idx++) {
-		struct dsi_display_mode panel_mode;
+	for (mode_idx = 0; mode_idx < timing_mode_count; mode_idx++) {
+		struct dsi_display_mode display_mode;
 		int topology_override = NO_OVERRIDE;
 		u32 frame_threshold_us = ctrl->ctrl->frame_threshold_time_us;
 
 		if (display->cmdline_timing == mode_idx)
 			topology_override = display->cmdline_topology;
 
-		memset(&panel_mode, 0, sizeof(panel_mode));
+		memset(&display_mode, 0, sizeof(display_mode));
 
 		rc = dsi_panel_get_mode(display->panel, mode_idx,
-						&panel_mode,
+						&display_mode,
 						topology_override);
 		if (rc) {
 			DSI_ERR("[%s] failed to get mode idx %d from panel\n",
@@ -5951,38 +5917,43 @@ int dsi_display_get_modes(struct dsi_display *display,
 			goto error;
 		}
 
+		is_cmd_mode = (display_mode.panel_mode == DSI_OP_CMD_MODE);
+
 		/* Calculate dsi frame transfer time */
-		if (display->panel->panel_mode == DSI_OP_CMD_MODE) {
+		if (is_cmd_mode) {
 			dsi_panel_calc_dsi_transfer_time(
 					&display->panel->host_config,
-					&panel_mode, frame_threshold_us);
-			panel_mode.priv_info->dsi_transfer_time_us =
-				panel_mode.timing.dsi_transfer_time_us;
-			panel_mode.priv_info->min_dsi_clk_hz =
-				panel_mode.timing.min_dsi_clk_hz;
+					&display_mode, frame_threshold_us);
+			display_mode.priv_info->dsi_transfer_time_us =
+				display_mode.timing.dsi_transfer_time_us;
+			display_mode.priv_info->min_dsi_clk_hz =
+				display_mode.timing.min_dsi_clk_hz;
 
-			panel_mode.priv_info->mdp_transfer_time_us =
-				panel_mode.priv_info->dsi_transfer_time_us;
-			panel_mode.timing.mdp_transfer_time_us =
-				panel_mode.timing.dsi_transfer_time_us;
+			display_mode.priv_info->mdp_transfer_time_us =
+				display_mode.priv_info->dsi_transfer_time_us;
+			display_mode.timing.mdp_transfer_time_us =
+				display_mode.timing.dsi_transfer_time_us;
 		}
 
 		is_split_link = host->split_link.split_link_enabled;
 		sublinks_count = host->split_link.num_sublinks;
 		if (is_split_link && sublinks_count > 1) {
-			panel_mode.timing.h_active *= sublinks_count;
-			panel_mode.timing.h_front_porch *= sublinks_count;
-			panel_mode.timing.h_sync_width *= sublinks_count;
-			panel_mode.timing.h_back_porch *= sublinks_count;
-			panel_mode.timing.h_skew *= sublinks_count;
-			panel_mode.pixel_clk_khz *= sublinks_count;
+			display_mode.timing.h_active *= sublinks_count;
+			display_mode.timing.h_front_porch *= sublinks_count;
+			display_mode.timing.h_sync_width *= sublinks_count;
+			display_mode.timing.h_back_porch *= sublinks_count;
+			display_mode.timing.h_skew *= sublinks_count;
+			display_mode.pixel_clk_khz *= sublinks_count;
 		} else {
-			panel_mode.timing.h_active *= display->ctrl_count;
-			panel_mode.timing.h_front_porch *= display->ctrl_count;
-			panel_mode.timing.h_sync_width *= display->ctrl_count;
-			panel_mode.timing.h_back_porch *= display->ctrl_count;
-			panel_mode.timing.h_skew *= display->ctrl_count;
-			panel_mode.pixel_clk_khz *= display->ctrl_count;
+			display_mode.timing.h_active *= display->ctrl_count;
+			display_mode.timing.h_front_porch *=
+						display->ctrl_count;
+			display_mode.timing.h_sync_width *=
+						display->ctrl_count;
+			display_mode.timing.h_back_porch *=
+						display->ctrl_count;
+			display_mode.timing.h_skew *= display->ctrl_count;
+			display_mode.pixel_clk_khz *= display->ctrl_count;
 		}
 
 		start = array_idx;
@@ -5997,10 +5968,10 @@ int dsi_display_get_modes(struct dsi_display *display,
 				goto error;
 			}
 
-			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
+			memcpy(sub_mode, &display_mode, sizeof(display_mode));
 			array_idx++;
 
-			if (!dfps_caps.dfps_support)
+			if (!dfps_caps.dfps_support || is_cmd_mode)
 				continue;
 
 			curr_refresh_rate = sub_mode->timing.refresh_rate;
@@ -6038,16 +6009,14 @@ int dsi_display_get_panel_vfp(void *dsi_display,
 	struct dsi_display *display = (struct dsi_display *)dsi_display;
 	struct dsi_host_common_cfg *host;
 
-	if (!display)
+	if (!display || !display->panel)
 		return -EINVAL;
 
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc)
-		return rc;
-
 	mutex_lock(&display->display_lock);
 
-	if (display->panel && display->panel->cur_mode)
+	count = display->panel->num_display_modes;
+
+	if (display->panel->cur_mode)
 		refresh_rate = display->panel->cur_mode->timing.refresh_rate;
 
 	dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
@@ -6089,9 +6058,9 @@ int dsi_display_get_default_lms(void *dsi_display, u32 *num_lm)
 
 	*num_lm = 0;
 
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc)
-		return rc;
+	mutex_lock(&display->display_lock);
+	count = display->panel->num_display_modes;
+	mutex_unlock(&display->display_lock);
 
 	if (!display->modes) {
 		struct dsi_display_mode *m;
@@ -6124,9 +6093,9 @@ int dsi_display_find_mode(struct dsi_display *display,
 
 	*out_mode = NULL;
 
-	rc = dsi_display_get_mode_count(display, &count);
-	if (rc)
-		return rc;
+	mutex_lock(&display->display_lock);
+	count = display->panel->num_display_modes;
+	mutex_unlock(&display->display_lock);
 
 	if (!display->modes) {
 		struct dsi_display_mode *m;

+ 1 - 1
msm/dsi/dsi_display.h

@@ -359,7 +359,7 @@ int dsi_display_get_mode_count(struct dsi_display *display, u32 *count);
  * dsi_display_get_modes() - get modes supported by display
  * @display:            Handle to display.
  * @modes;              Output param, list of DSI modes. Number of modes matches
- *                      count returned by dsi_display_get_mode_count
+ *                      count got from display->panel->num_display_modes;
  *
  * Return: error code.
  */

+ 5 - 1
msm/dsi/dsi_drm.c

@@ -242,8 +242,12 @@ static void dsi_bridge_enable(struct drm_bridge *bridge)
 		DSI_ERR("[%d] DSI display post enabled failed, rc=%d\n",
 		       c_bridge->id, rc);
 
-	if (display && display->drm_conn)
+	if (display && display->drm_conn) {
 		sde_connector_helper_bridge_enable(display->drm_conn);
+		if (c_bridge->dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_POMS)
+			sde_connector_schedule_status_work(display->drm_conn,
+				true);
+	}
 }
 
 static void dsi_bridge_disable(struct drm_bridge *bridge)

+ 70 - 3
msm/dsi/dsi_panel.c

@@ -3212,6 +3212,7 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 {
 	struct dsi_panel *panel;
 	struct dsi_parser_utils *utils;
+	const char *panel_physical_type;
 	int rc = 0;
 
 	panel = kzalloc(sizeof(*panel), GFP_KERNEL);
@@ -3230,6 +3231,14 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 	if (!panel->name)
 		panel->name = DSI_PANEL_DEFAULT_LABEL;
 
+	/*
+	 * Set panel type to LCD as default.
+	 */
+	panel->panel_type = DSI_DISPLAY_PANEL_TYPE_LCD;
+	panel_physical_type = utils->get_property(utils->data,
+				"qcom,mdss-dsi-panel-physical-type", NULL);
+	if (panel_physical_type && !strcmp(panel_physical_type, "oled"))
+		panel->panel_type = DSI_DISPLAY_PANEL_TYPE_OLED;
 	rc = dsi_panel_parse_host_config(panel);
 	if (rc) {
 		DSI_ERR("failed to parse host configuration, rc=%d\n",
@@ -3306,6 +3315,7 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 	if (rc)
 		DSI_DEBUG("failed to parse esd config, rc=%d\n", rc);
 
+	panel->power_mode = SDE_MODE_DPMS_OFF;
 	drm_panel_init(&panel->drm_panel);
 	panel->drm_panel.dev = &panel->mipi_device.dev;
 	panel->mipi_device.dev.of_node = of_node;
@@ -3446,8 +3456,11 @@ int dsi_panel_get_mode_count(struct dsi_panel *panel)
 {
 	const u32 SINGLE_MODE_SUPPORT = 1;
 	struct dsi_parser_utils *utils;
-	struct device_node *timings_np;
+	struct device_node *timings_np, *child_np;
+	int num_dfps_rates, num_bit_clks;
+	int num_video_modes = 0, num_cmd_modes = 0;
 	int count, rc = 0;
+	void *utils_data = NULL;
 
 	if (!panel) {
 		DSI_ERR("invalid params\n");
@@ -3474,12 +3487,37 @@ int dsi_panel_get_mode_count(struct dsi_panel *panel)
 		goto error;
 	}
 
-	/* No multiresolution support is available for video mode panels */
+	/* No multiresolution support is available for video mode panels.
+	 * Multi-mode is supported for video mode during POMS is enabled.
+	 */
 	if (panel->panel_mode != DSI_OP_CMD_MODE &&
-		!panel->host_config.ext_bridge_mode)
+		!panel->host_config.ext_bridge_mode &&
+		!panel->panel_mode_switch_enabled)
 		count = SINGLE_MODE_SUPPORT;
 
 	panel->num_timing_nodes = count;
+	dsi_for_each_child_node(timings_np, child_np) {
+		utils_data = child_np;
+		if (utils->read_bool(utils->data, "qcom,mdss-dsi-video-mode"))
+			num_video_modes++;
+		else if (utils->read_bool(utils->data,
+					"qcom,mdss-dsi-cmd-mode"))
+			num_cmd_modes++;
+		else if (panel->panel_mode == DSI_OP_VIDEO_MODE)
+			num_video_modes++;
+		else if (panel->panel_mode == DSI_OP_CMD_MODE)
+			num_cmd_modes++;
+	}
+
+	num_dfps_rates = !panel->dfps_caps.dfps_support ? 1 :
+					panel->dfps_caps.dfps_list_len;
+
+	num_bit_clks = !panel->dyn_clk_caps.dyn_clk_support ? 1 :
+					panel->dyn_clk_caps.bit_clk_list_len;
+
+	/* Inflate num_of_modes by fps and bit clks in dfps */
+	panel->num_display_modes = (num_cmd_modes * num_bit_clks) +
+			(num_video_modes * num_bit_clks * num_dfps_rates);
 
 error:
 	return rc;
@@ -3851,6 +3889,17 @@ int dsi_panel_set_lp1(struct dsi_panel *panel)
 	if (!panel->panel_initialized)
 		goto exit;
 
+	/*
+	 * Consider LP1->LP2->LP1.
+	 * If the panel is already in LP mode, do not need to
+	 * set the regulator.
+	 * IBB and AB power mode would be set at the same time
+	 * in PMIC driver, so we only call ibb setting that is enough.
+	 */
+	if (dsi_panel_is_type_oled(panel) &&
+		panel->power_mode != SDE_MODE_DPMS_LP2)
+		dsi_pwr_panel_regulator_mode_set(&panel->power_info,
+			"ibb", REGULATOR_MODE_IDLE);
 	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP1);
 	if (rc)
 		DSI_ERR("[%s] failed to send DSI_CMD_SET_LP1 cmd, rc=%d\n",
@@ -3895,6 +3944,14 @@ int dsi_panel_set_nolp(struct dsi_panel *panel)
 	if (!panel->panel_initialized)
 		goto exit;
 
+	/*
+	 * Consider about LP1->LP2->NOLP.
+	 */
+	if (dsi_panel_is_type_oled(panel) &&
+	    (panel->power_mode == SDE_MODE_DPMS_LP1 ||
+	     panel->power_mode == SDE_MODE_DPMS_LP2))
+		dsi_pwr_panel_regulator_mode_set(&panel->power_info,
+			"ibb", REGULATOR_MODE_NORMAL);
 	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_NOLP);
 	if (rc)
 		DSI_ERR("[%s] failed to send DSI_CMD_SET_NOLP cmd, rc=%d\n",
@@ -4295,6 +4352,15 @@ int dsi_panel_disable(struct dsi_panel *panel)
 
 	/* Avoid sending panel off commands when ESD recovery is underway */
 	if (!atomic_read(&panel->esd_recovery_pending)) {
+		/*
+		 * Need to set IBB/AB regulator mode to STANDBY,
+		 * if panel is going off from AOD mode.
+		 */
+		if (dsi_panel_is_type_oled(panel) &&
+			(panel->power_mode == SDE_MODE_DPMS_LP1 ||
+			panel->power_mode == SDE_MODE_DPMS_LP2))
+			dsi_pwr_panel_regulator_mode_set(&panel->power_info,
+				"ibb", REGULATOR_MODE_STANDBY);
 		rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF);
 		if (rc) {
 			/*
@@ -4309,6 +4375,7 @@ int dsi_panel_disable(struct dsi_panel *panel)
 		}
 	}
 	panel->panel_initialized = false;
+	panel->power_mode = SDE_MODE_DPMS_OFF;
 
 	mutex_unlock(&panel->panel_lock);
 	return rc;

+ 14 - 0
msm/dsi/dsi_panel.h

@@ -62,6 +62,12 @@ enum dsi_dms_mode {
 	DSI_DMS_MODE_RES_SWITCH_IMMEDIATE,
 };
 
+enum dsi_panel_physical_type {
+	DSI_DISPLAY_PANEL_TYPE_LCD = 0,
+	DSI_DISPLAY_PANEL_TYPE_OLED,
+	DSI_DISPLAY_PANEL_TYPE_MAX,
+};
+
 struct dsi_dfps_capabilities {
 	enum dsi_dfps_type type;
 	u32 min_refresh_rate;
@@ -171,6 +177,7 @@ struct dsi_panel {
 
 	struct dsi_display_mode *cur_mode;
 	u32 num_timing_nodes;
+	u32 num_display_modes;
 
 	struct dsi_regulator_info power_info;
 	struct dsi_backlight_config bl_config;
@@ -197,6 +204,8 @@ struct dsi_panel {
 	bool sync_broadcast_en;
 
 	int panel_test_gpio;
+	int power_mode;
+	enum dsi_panel_physical_type panel_type;
 };
 
 static inline bool dsi_panel_ulps_feature_enabled(struct dsi_panel *panel)
@@ -219,6 +228,11 @@ static inline void dsi_panel_release_panel_lock(struct dsi_panel *panel)
 	mutex_unlock(&panel->panel_lock);
 }
 
+static inline bool dsi_panel_is_type_oled(struct dsi_panel *panel)
+{
+	return (panel->panel_type == DSI_DISPLAY_PANEL_TYPE_OLED);
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node,
 				struct device_node *parser_node,

+ 48 - 0
msm/dsi/dsi_pwr.c

@@ -378,3 +378,51 @@ int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable)
 
 	return rc;
 }
+
+/*
+ * dsi_pwr_panel_regulator_mode_set()
+ * set the AB/IBB regulator mode for OLED panel
+ * AOD mode entry and exit
+ * @regs:	Pointer to set of regulators to enable or disable.
+ * @reg_name:	Name of panel power we want to set.
+ * @retulator_mode:	Regulator mode values, like:
+ *	REGULATOR_MODE_INVALID
+ *	REGULATOR_MODE_FAST
+ *	REGULATOR_MODE_NORMAL
+ *	REGULATOR_MODE_IDLE
+ *	REGULATOR_MODE_STANDBY
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_panel_regulator_mode_set(struct dsi_regulator_info *regs,
+						const char *reg_name,
+						int regulator_mode)
+{
+	int i = 0, rc = 0;
+	struct dsi_vreg *vreg;
+
+	if (regs->count == 0)
+		return -EINVAL;
+
+	if (!regs->vregs)
+		return -EINVAL;
+
+	for (i = 0; i < regs->count; i++) {
+		vreg = &regs->vregs[i];
+		if (!strcmp(vreg->vreg_name, reg_name)) {
+			rc = regulator_set_mode(vreg->vreg,
+							regulator_mode);
+			if (rc)
+				DSI_ERR("Regulator %s set mode %d failed\n",
+					vreg->vreg_name, rc);
+			break;
+		}
+	}
+
+	if (i >= regs->count) {
+		DSI_ERR("Regulator %s was not found\n", reg_name);
+		return -EINVAL;
+	}
+
+	return rc;
+}

+ 18 - 0
msm/dsi/dsi_pwr.h

@@ -85,4 +85,22 @@ int dsi_pwr_get_dt_vreg_data(struct device *dev,
  * return: error code in case of failure or 0 for success.
  */
 int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
+
+/**
+ * dsi_pwr_panel_regulator_mode_set()
+ * set regulator mode for OLED panel
+ * @regs:       Pointer to set of regulators to enable or disable.
+ * @reg_name:		Panel regulator name
+ * @regulator_mode:	Regulator mode values, like:
+ *                  REGULATOR_MODE_INVALID
+ *                  REGULATOR_MODE_FAST
+ *                  REGULATOR_MODE_NORMAL
+ *                  REGULATOR_MODE_IDLE
+ *                  REGULATOR_MODE_STANDBY
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_panel_regulator_mode_set(struct dsi_regulator_info *regs,
+						const char *reg_name,
+						int regulator_mode);
 #endif /* _DSI_PWR_H_ */

+ 2 - 1
msm/msm_atomic.c

@@ -535,7 +535,8 @@ static void complete_commit(struct msm_commit *c)
 
 	msm_atomic_helper_commit_modeset_disables(dev, state);
 
-	drm_atomic_helper_commit_planes(dev, state, 0);
+	drm_atomic_helper_commit_planes(dev, state,
+				DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
 	msm_atomic_helper_commit_modeset_enables(dev, state);
 

+ 27 - 12
msm/msm_drv.c

@@ -1938,6 +1938,33 @@ msm_gem_smmu_address_space_get(struct drm_device *dev,
 	return funcs->get_address_space(priv->kms, domain);
 }
 
+int msm_get_mixer_count(struct msm_drm_private *priv,
+		const struct drm_display_mode *mode,
+		const struct msm_resource_caps_info *res, u32 *num_lm)
+{
+	struct msm_kms *kms;
+	const struct msm_kms_funcs *funcs;
+
+	if (!priv) {
+		DRM_ERROR("invalid drm private struct\n");
+		return -EINVAL;
+	}
+
+	kms = priv->kms;
+	if (!kms) {
+		DRM_ERROR("invalid msm kms struct\n");
+		return -EINVAL;
+	}
+
+	funcs = kms->funcs;
+	if (!funcs || !funcs->get_mixer_count) {
+		DRM_ERROR("invalid function pointers\n");
+		return -EINVAL;
+	}
+
+	return funcs->get_mixer_count(priv->kms, mode, res, num_lm);
+}
+
 static int msm_drm_bind(struct device *dev)
 {
 	return msm_drm_init(dev, &msm_driver);
@@ -2024,16 +2051,6 @@ static struct platform_driver msm_platform_driver = {
 	},
 };
 
-#ifdef CONFIG_QCOM_KGSL
-void __init adreno_register(void)
-{
-}
-
-void __exit adreno_unregister(void)
-{
-}
-#endif
-
 static int __init msm_drm_register(void)
 {
 	if (!modeset)
@@ -2044,7 +2061,6 @@ static int __init msm_drm_register(void)
 	msm_dsi_register();
 	msm_edp_register();
 	msm_hdmi_register();
-	adreno_register();
 	return platform_driver_register(&msm_platform_driver);
 }
 
@@ -2053,7 +2069,6 @@ static void __exit msm_drm_unregister(void)
 	DBG("fini");
 	platform_driver_unregister(&msm_platform_driver);
 	msm_hdmi_unregister();
-	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
 	msm_smmu_driver_cleanup();

+ 4 - 0
msm/msm_drv.h

@@ -1029,4 +1029,8 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
 	return remaining_jiffies;
 }
 
+int msm_get_mixer_count(struct msm_drm_private *priv,
+		const struct drm_display_mode *mode,
+		const struct msm_resource_caps_info *res, u32 *num_lm);
+
 #endif /* __MSM_DRV_H__ */

+ 4 - 0
msm/msm_kms.h

@@ -122,6 +122,10 @@ struct msm_kms_funcs {
 	int (*cont_splash_config)(struct msm_kms *kms);
 	/* check for continuous splash status */
 	bool (*check_for_splash)(struct msm_kms *kms);
+	/* topology information */
+	int (*get_mixer_count)(const struct msm_kms *kms,
+			const struct drm_display_mode *mode,
+			const struct msm_resource_caps_info *res, u32 *num_lm);
 };
 
 struct msm_kms {

+ 37 - 2
msm/sde/sde_color_processing.c

@@ -2787,8 +2787,9 @@ static void _sde_cp_crtc_queue_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
 	struct drm_msm_ltm_buffer *buf;
 	struct drm_msm_ltm_stats_data *ltm_data = NULL;
 	u32 i;
-	bool found = false;
+	bool found = false, already = false;
 	unsigned long irq_flags;
+	struct sde_ltm_buffer *buffer = NULL, *n = NULL;
 
 	if (!sde_crtc || !cfg) {
 		DRM_ERROR("invalid parameters sde_crtc %pK cfg %pK\n", sde_crtc,
@@ -2818,7 +2819,13 @@ static void _sde_cp_crtc_queue_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
 				 sde_crtc->ltm_buffers[i]->offset);
 			ltm_data->status_flag = 0;
 
-			list_add_tail(&sde_crtc->ltm_buffers[i]->node,
+			list_for_each_entry_safe(buffer, n,
+					&sde_crtc->ltm_buf_free, node) {
+				if (buffer->drm_fb_id == buf->fd)
+					already =  true;
+			}
+			if (!already)
+				list_add_tail(&sde_crtc->ltm_buffers[i]->node,
 					&sde_crtc->ltm_buf_free);
 			found = true;
 		}
@@ -2892,6 +2899,7 @@ static void _sde_cp_crtc_enable_ltm_hist(struct sde_crtc *sde_crtc,
 			sde_crtc->ltm_hist_en = true;
 		hw_dspp->ops.setup_ltm_hist_ctrl(hw_dspp, hw_cfg,
 			true, addr);
+		SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 	}
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 }
@@ -2902,15 +2910,30 @@ static void _sde_cp_crtc_disable_ltm_hist(struct sde_crtc *sde_crtc,
 {
 	unsigned long irq_flags;
 	u32 i = 0;
+	bool notify = false;
+	u8 hist_off = 1;
+	struct drm_event event;
 
 	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
+	notify = sde_crtc->ltm_hist_en;
 	sde_crtc->ltm_hist_en = false;
 	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
 	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
 	for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++)
 		list_add(&sde_crtc->ltm_buffers[i]->node,
 			&sde_crtc->ltm_buf_free);
+	hw_dspp->ops.setup_ltm_hist_ctrl(hw_dspp, NULL,
+			false, 0);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
+	event.type = DRM_EVENT_LTM_OFF;
+	event.length = sizeof(hist_off);
+	if (notify) {
+		SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
+		msm_mode_object_event_notify(&sde_crtc->base.base,
+				sde_crtc->base.dev, &event,
+				(u8 *)&hist_off);
+	}
+
 }
 
 static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
@@ -2970,6 +2993,7 @@ static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
 		}
 
 		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
+		DRM_DEBUG_DRIVER("LTM histogram is disabled\n");
 		return;
 	}
 
@@ -3010,6 +3034,7 @@ static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
 
 	list_del_init(&busy_buf->node);
 	list_del_init(&free_buf->node);
+	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
 	list_add_tail(&free_buf->node, &sde_crtc->ltm_buf_busy);
 
 	ltm_data = (struct drm_msm_ltm_stats_data *)
@@ -3019,6 +3044,7 @@ static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
 
 	hw_lm = sde_crtc->mixers[0].hw_lm;
 	if (!hw_lm) {
+		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 		DRM_ERROR("invalid layer mixer\n");
 		return;
 	}
@@ -3085,6 +3111,7 @@ static void sde_cp_notify_ltm_hist(struct drm_crtc *crtc, void *arg)
 		/* histogram is disabled, no need to notify user space */
 		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 		mutex_unlock(&sde_crtc->ltm_buffer_lock);
+		DRM_DEBUG_DRIVER("ltm histogram is disabled\n");
 		return;
 	}
 
@@ -3093,6 +3120,8 @@ static void sde_cp_notify_ltm_hist(struct drm_crtc *crtc, void *arg)
 	payload.offset = buf->offset;
 	event.length = sizeof(struct drm_msm_ltm_buffer);
 	event.type = DRM_EVENT_LTM_HIST;
+	DRM_DEBUG_DRIVER("notify with LTM hist event drm_fb_id %d\n",
+				buf->drm_fb_id);
 	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
 					(u8 *)&payload);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
@@ -3305,3 +3334,9 @@ static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
 
 	sde_crtc->ltm_cfg = *cfg_param;
 }
+
+int sde_cp_ltm_off_event_handler(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *hist_irq)
+{
+	return 0;
+}

+ 8 - 0
msm/sde/sde_color_processing.h

@@ -182,4 +182,12 @@ int sde_cp_ltm_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
 int sde_cp_ltm_wb_pb_interrupt(struct drm_crtc *crtc_drm, bool en,
 	struct sde_irq_callback *hist_irq);
 
+/**
+ * sde_cp_ltm_off_event_handler: API to enable/disable LTM off notification
+ * @crtc: Pointer to crtc.
+ * @en: Variable to enable/disable notification.
+ * @irq: Pointer to irq callback
+ */
+int sde_cp_ltm_off_event_handler(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *hist_irq);
 #endif /*_SDE_COLOR_PROCESSING_H */

+ 5 - 5
msm/sde/sde_core_perf.c

@@ -456,7 +456,7 @@ static inline bool _sde_core_perf_is_wb(struct drm_crtc *crtc)
 {
 	enum sde_intf_mode if_mode = INTF_MODE_NONE;
 
-	if_mode = sde_crtc_get_intf_mode(crtc);
+	if_mode = sde_crtc_get_intf_mode(crtc, crtc->state);
 	if (if_mode == INTF_MODE_WB_BLOCK ||
 		if_mode == INTF_MODE_WB_LINE)
 		return true;
@@ -678,7 +678,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
 	/* only do this for command mode rt client (non-rsc client) */
-	if ((sde_crtc_get_intf_mode(crtc) != INTF_MODE_CMD) &&
+	if ((sde_crtc_get_intf_mode(crtc, crtc->state) != INTF_MODE_CMD) &&
 		(sde_crtc_get_client_type(crtc) != RT_RSC_CLIENT))
 		return;
 
@@ -686,11 +686,11 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 	 * If video interface present, cmd panel bandwidth cannot be
 	 * released.
 	 */
-	if (sde_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+	if (sde_crtc_get_intf_mode(crtc, crtc->state) == INTF_MODE_CMD)
 		drm_for_each_crtc(tmp_crtc, crtc->dev) {
 			if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-				sde_crtc_get_intf_mode(tmp_crtc) ==
-						INTF_MODE_VIDEO)
+				sde_crtc_get_intf_mode(tmp_crtc,
+					tmp_crtc->state) == INTF_MODE_VIDEO)
 				return;
 		}
 

+ 18 - 10
msm/sde/sde_crtc.c

@@ -66,6 +66,7 @@ static struct sde_crtc_custom_events custom_events[] = {
 	{DRM_EVENT_SDE_POWER, sde_crtc_pm_event_handler},
 	{DRM_EVENT_LTM_HIST, sde_cp_ltm_hist_interrupt},
 	{DRM_EVENT_LTM_WB_PB, sde_cp_ltm_wb_pb_interrupt},
+	{DRM_EVENT_LTM_OFF, sde_cp_ltm_off_event_handler},
 };
 
 /* default input fence timeout, in ms */
@@ -729,9 +730,6 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
 			return -EINVAL;
 		}
 
-		if (!mode_info.roi_caps.enabled)
-			continue;
-
 		sde_conn = to_sde_connector(conn_state->connector);
 		sde_conn_state = to_sde_connector_state(conn_state);
 
@@ -741,6 +739,9 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
 						&sde_conn_state->property_state,
 						CONNECTOR_PROP_ROI_V1);
 
+		if (!mode_info.roi_caps.enabled)
+			continue;
+
 		/*
 		 * current driver only supports same connector and crtc size,
 		 * but if support for different sizes is added, driver needs
@@ -1781,6 +1782,8 @@ int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
 		return -EINVAL;
 
 	smmu_state = &sde_kms->smmu_state;
+	smmu_state->prev_state = smmu_state->state;
+
 	sde_crtc = to_sde_crtc(crtc);
 	secure_level = sde_crtc_get_secure_level(crtc, crtc->state);
 	catalog = sde_kms->catalog;
@@ -1791,7 +1794,8 @@ int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
 	 */
 	drm_for_each_encoder_mask(encoder, crtc->dev,
 			crtc->state->encoder_mask) {
-		post_commit |= sde_encoder_check_curr_mode(encoder,
+		if (sde_encoder_is_dsi_display(encoder))
+			post_commit |= sde_encoder_check_curr_mode(encoder,
 						MSM_DISPLAY_VIDEO_MODE);
 	}
 
@@ -2224,17 +2228,18 @@ end:
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc,
+		struct drm_crtc_state *cstate)
 {
 	struct drm_encoder *encoder;
 
-	if (!crtc || !crtc->dev) {
+	if (!crtc || !crtc->dev || !cstate) {
 		SDE_ERROR("invalid crtc\n");
 		return INTF_MODE_NONE;
 	}
 
 	drm_for_each_encoder_mask(encoder, crtc->dev,
-			crtc->state->encoder_mask) {
+			cstate->encoder_mask) {
 		/* continue if copy encoder is encountered */
 		if (sde_encoder_in_clone_mode(encoder))
 			continue;
@@ -4272,9 +4277,11 @@ static int _sde_crtc_check_secure_state_smmu_translation(struct drm_crtc *crtc,
 	struct drm_encoder *encoder;
 	int is_video_mode = false;
 
-	drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask)
-		is_video_mode |= sde_encoder_check_curr_mode(encoder,
+	drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
+		if (sde_encoder_is_dsi_display(encoder))
+			is_video_mode |= sde_encoder_check_curr_mode(encoder,
 						MSM_DISPLAY_VIDEO_MODE);
+	}
 
 	/*
 	 * In video mode check for null commit before transition
@@ -5584,7 +5591,8 @@ static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
-	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
+	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc,
+				crtc->state));
 	seq_printf(s, "core_clk_rate: %llu\n",
 			sde_crtc->cur_perf.core_clk_rate);
 	for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC;

+ 10 - 6
msm/sde/sde_crtc.h

@@ -593,9 +593,11 @@ int sde_crtc_register_custom_event(struct sde_kms *kms,
 
 /**
  * sde_crtc_get_intf_mode - get interface mode of the given crtc
- * @crtc: Pointert to crtc
+ * @crtc: Pointert to DRM crtc
+ * @crtc: Pointert to DRM crtc_state
  */
-enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc,
+		struct drm_crtc_state *cstate);
 
 /**
  * sde_crtc_get_fps_mode - get frame rate of the given crtc
@@ -621,14 +623,16 @@ static inline enum sde_crtc_client_type sde_crtc_get_client_type(
 
 /**
  * sde_crtc_is_rt_client - check if real-time client or not
- * @crtc: Pointer to crtc
+ * @crtc: Pointer to DRM crtc
+ * @crtc_state: Pointer to DRM crtc_state
  */
-static inline bool sde_crtc_is_rt_client(struct drm_crtc *crtc)
+static inline bool sde_crtc_is_rt_client(struct drm_crtc *crtc,
+		struct drm_crtc_state *cstate)
 {
-	if (!crtc || !crtc->state)
+	if (!crtc || !cstate)
 		return true;
 
-	return (sde_crtc_get_intf_mode(crtc) != INTF_MODE_WB_LINE);
+	return (sde_crtc_get_intf_mode(crtc, cstate) != INTF_MODE_WB_LINE);
 }
 
 /**

+ 43 - 8
msm/sde/sde_encoder.c

@@ -456,6 +456,14 @@ bool sde_encoder_is_primary_display(struct drm_encoder *drm_enc)
 		SDE_CONNECTOR_PRIMARY);
 }
 
+bool sde_encoder_is_dsi_display(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+
+	return sde_enc &&
+		(sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI);
+}
+
 int sde_encoder_in_cont_splash(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
@@ -769,8 +777,8 @@ void sde_encoder_helper_update_intf_cfg(
 	struct sde_hw_intf_cfg_v1 *intf_cfg;
 	enum sde_3d_blend_mode mode_3d;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid arg, encoder %d\n", !phys_enc);
+	if (!phys_enc || !phys_enc->hw_pp) {
+		SDE_ERROR("invalid args, encoder %d\n", !phys_enc);
 		return;
 	}
 
@@ -1761,6 +1769,9 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
 		else
 			vsync_source = sde_enc->te_source;
 
+		SDE_EVT32(DRMID(&sde_enc->base), vsync_source, is_dummy,
+				disp_info->is_te_using_watchdog_timer);
+
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 			phys = sde_enc->phys_encs[i];
 
@@ -1817,7 +1828,8 @@ static void _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc)
 	 */
 }
 
-static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
+int sde_encoder_helper_switch_vsync(struct drm_encoder *drm_enc,
+	 bool watchdog_te)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct msm_display_info disp_info;
@@ -1832,7 +1844,7 @@ static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
 	sde_encoder_control_te(drm_enc, false);
 
 	memcpy(&disp_info, &sde_enc->disp_info, sizeof(disp_info));
-	disp_info.is_te_using_watchdog_timer = true;
+	disp_info.is_te_using_watchdog_timer = watchdog_te;
 	_sde_encoder_update_vsync_source(sde_enc, &disp_info, false);
 
 	sde_encoder_control_te(drm_enc, true);
@@ -1893,7 +1905,7 @@ static int _sde_encoder_rsc_client_update_vsync_wait(
 			 * by generating the vsync from watchdog timer.
 			 */
 			if (crtc->base.id == wait_vblank_crtc_id)
-				_sde_encoder_switch_to_watchdog_vsync(drm_enc);
+				sde_encoder_helper_switch_vsync(drm_enc, true);
 		}
 	}
 
@@ -3336,7 +3348,8 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 	/* wait for idle */
 	sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
-	if (sde_enc->input_handler)
+	if (sde_enc->input_handler &&
+		sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
 		input_unregister_handler(sde_enc->input_handler);
 
 	/*
@@ -5687,6 +5700,28 @@ int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
 	return ret;
 }
 
+void sde_encoder_helper_get_jitter_bounds_ns(struct drm_encoder *drm_enc,
+		u64 *l_bound, u64 *u_bound)
+{
+	struct sde_encoder_virt *sde_enc;
+	u64 jitter_ns, frametime_ns;
+	struct msm_mode_info *info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	info = &sde_enc->mode_info;
+
+	frametime_ns = (1 * 1000000000) / info->frame_rate;
+	jitter_ns =  (info->jitter_numer * frametime_ns) /
+				(info->jitter_denom * 100);
+	*l_bound = frametime_ns - jitter_ns;
+	*u_bound = frametime_ns + jitter_ns;
+}
+
 u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
@@ -5984,12 +6019,12 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc,
 		kthread_flush_work(&sde_enc->esd_trigger_work);
 	}
 
-	/**
+	/*
 	 * panel may stop generating te signal (vsync) during esd failure. rsc
 	 * hardware may hang without vsync. Avoid rsc hang by generating the
 	 * vsync from watchdog timer instead of panel.
 	 */
-	_sde_encoder_switch_to_watchdog_vsync(enc);
+	sde_encoder_helper_switch_vsync(enc, true);
 
 	if (!skip_pre_kickoff)
 		sde_encoder_wait_for_event(enc, MSM_ENC_TX_COMPLETE);

+ 10 - 1
msm/sde/sde_encoder.h

@@ -24,6 +24,7 @@
 #include "msm_prop.h"
 #include "sde_hw_mdss.h"
 #include "sde_kms.h"
+#include "sde_connector.h"
 
 #define MAX_CHANNELS_PER_ENC 2
 
@@ -51,7 +52,7 @@ struct sde_encoder_hw_resources {
 	enum sde_intf_mode wbs[WB_MAX];
 	bool needs_cdm;
 	u32 display_num_of_h_tiles;
-	bool display_type;
+	enum sde_connector_display display_type;
 	struct msm_display_topology topology;
 };
 
@@ -319,6 +320,14 @@ bool sde_encoder_in_clone_mode(struct drm_encoder *enc);
  */
 bool sde_encoder_is_primary_display(struct drm_encoder *enc);
 
+/**
+ * sde_encoder_is_dsi_display - checks if underlying display is DSI
+ *     display or not.
+ * @drm_enc:    Pointer to drm encoder structure
+ * @Return:     true if it is primary display. false if secondary display
+ */
+bool sde_encoder_is_dsi_display(struct drm_encoder *enc);
+
 /**
  * sde_encoder_control_idle_pc - control enable/disable of idle power collapse
  * @drm_enc:    Pointer to drm encoder structure

+ 32 - 0
msm/sde/sde_encoder_phys.h

@@ -25,6 +25,7 @@
 #define KICKOFF_TIMEOUT_MS		84
 #define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
 
+#define MAX_TE_PROFILE_COUNT		5
 /**
  * enum sde_enc_split_role - Role this physical encoder will play in a
  *	split-panel configuration, where one panel is master, and others slaves.
@@ -361,6 +362,17 @@ struct sde_encoder_phys_cmd_autorefresh {
 	wait_queue_head_t kickoff_wq;
 };
 
+/**
+ * struct sde_encoder_phys_cmd_te_timestamp - list node to keep track of
+ *     rd_ptr/TE timestamp
+ * @list: list node
+ * @timestamp: TE timestamp
+ */
+struct sde_encoder_phys_cmd_te_timestamp {
+	struct list_head list;
+	ktime_t timestamp;
+};
+
 /**
  * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
  *	mode specific operations
@@ -371,6 +383,8 @@ struct sde_encoder_phys_cmd_autorefresh {
  * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
  * @pending_vblank_wq: Wait queue for blocking until VBLANK received
  * @wr_ptr_wait_success: log wr_ptr_wait success for release fence trigger
+ * @te_timestamp_list: List head for the TE timestamp list
+ * @te_timestamp: Array of size MAX_TE_PROFILE_COUNT te_timestamp_list elements
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
@@ -380,6 +394,9 @@ struct sde_encoder_phys_cmd {
 	atomic_t pending_vblank_cnt;
 	wait_queue_head_t pending_vblank_wq;
 	bool wr_ptr_wait_success;
+	struct list_head te_timestamp_list;
+	struct sde_encoder_phys_cmd_te_timestamp
+			te_timestamp[MAX_TE_PROFILE_COUNT];
 };
 
 /**
@@ -558,6 +575,21 @@ int sde_encoder_helper_wait_event_timeout(
 		int32_t hw_id,
 		struct sde_encoder_wait_info *info);
 
+/*
+ * sde_encoder_get_fps - get the allowed panel jitter in nanoseconds
+ * @encoder: Pointer to drm encoder object
+ */
+void sde_encoder_helper_get_jitter_bounds_ns(struct drm_encoder *encoder,
+			u64 *l_bound, u64 *u_bound);
+
+/**
+ * sde_encoder_helper_switch_vsync - switch vsync source to WD or default
+ * @drm_enc:     Pointer to drm encoder structure
+ * @watchdog_te: switch vsync source to watchdog TE
+ */
+int sde_encoder_helper_switch_vsync(struct drm_encoder *drm_enc,
+		bool watchdog_te);
+
 /**
  * sde_encoder_helper_hw_reset - issue ctl hw reset
  *	This helper function may be optionally specified by physical

+ 130 - 25
msm/sde/sde_encoder_phys_cmd.c

@@ -230,6 +230,8 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	u32 scheduler_status = INVALID_CTL_STATUS;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
+	struct sde_encoder_phys_cmd_te_timestamp *te_timestamp;
+	unsigned long lock_flags;
 
 	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
 		return;
@@ -241,6 +243,16 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	if (ctl && ctl->ops.get_scheduler_status)
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	te_timestamp = list_first_entry_or_null(&cmd_enc->te_timestamp_list,
+				struct sde_encoder_phys_cmd_te_timestamp, list);
+	if (te_timestamp) {
+		list_del_init(&te_timestamp->list);
+		te_timestamp->timestamp = ktime_get();
+		list_add_tail(&te_timestamp->list, &cmd_enc->te_timestamp_list);
+	}
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 		info[0].pp_idx, info[0].intf_idx,
@@ -1337,11 +1349,54 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
 	return ret;
 }
 
+static bool _sde_encoder_phys_cmd_needs_vsync_change(
+		struct sde_encoder_phys *phys_enc, ktime_t profile_timestamp)
+{
+	struct sde_encoder_phys_cmd *cmd_enc;
+	struct sde_encoder_phys_cmd_te_timestamp *cur;
+	struct sde_encoder_phys_cmd_te_timestamp *prev = NULL;
+	ktime_t time_diff;
+	u64 l_bound = 0, u_bound = 0;
+	bool ret = false;
+	unsigned long lock_flags;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	sde_encoder_helper_get_jitter_bounds_ns(phys_enc->parent,
+							&l_bound, &u_bound);
+	if (!l_bound || !u_bound) {
+		SDE_ERROR_CMDENC(cmd_enc, "invalid vsync jitter bounds\n");
+		return false;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	list_for_each_entry_reverse(cur, &cmd_enc->te_timestamp_list, list) {
+		if (prev && ktime_after(cur->timestamp, profile_timestamp)) {
+			time_diff = ktime_sub(prev->timestamp, cur->timestamp);
+			if ((time_diff < l_bound) || (time_diff > u_bound)) {
+				ret = true;
+				break;
+			}
+		}
+		prev = cur;
+	}
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	if (ret) {
+		SDE_DEBUG_CMDENC(cmd_enc,
+		    "time_diff:%llu, prev:%llu, cur:%llu, jitter:%llu/%llu\n",
+			time_diff, prev->timestamp, cur->timestamp,
+			l_bound, u_bound);
+		SDE_EVT32(DRMID(phys_enc->parent),
+			(u32) (l_bound / 1000), (u32) (u_bound / 1000),
+			(u32) (time_diff / 1000), SDE_EVTLOG_ERROR);
+	}
+
+	return ret;
+}
+
 static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_encoder_wait_info wait_info = {0};
 	int ret;
 	bool frame_pending = true;
@@ -1369,29 +1424,9 @@ static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		if (ctl && ctl->ops.get_start_state)
 			frame_pending = ctl->ops.get_start_state(ctl);
 
-		if (frame_pending)
-			SDE_ERROR_CMDENC(cmd_enc,
-				"wr_ptrt start interrupt wait failed\n");
-		else
-			ret = 0;
-
-		/*
-		 * Signaling the retire fence at wr_ptr timeout
-		 * to allow the next commit and avoid device freeze.
-		 * As wr_ptr timeout can occurs due to no read ptr,
-		 * updating pending_rd_ptr_cnt here may not cover all
-		 * cases. Hence signaling the retire fence.
-		 */
-		if (sde_encoder_phys_cmd_is_master(phys_enc) &&
-			atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
-				-1, 0))
-			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+		ret = frame_pending ? ret : 0;
 	}
 
-	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;
-
 	return ret;
 }
 
@@ -1406,6 +1441,13 @@ static int sde_encoder_phys_cmd_wait_for_tx_complete(
 
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
+	if (!atomic_read(&phys_enc->pending_kickoff_cnt)) {
+		SDE_EVT32(DRMID(phys_enc->parent),
+			phys_enc->intf_idx - INTF_0,
+			phys_enc->enable_state);
+		return 0;
+	}
+
 	rc = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
 	if (rc) {
 		SDE_EVT32(DRMID(phys_enc->parent),
@@ -1416,11 +1458,60 @@ static int sde_encoder_phys_cmd_wait_for_tx_complete(
 	return rc;
 }
 
+static int _sde_encoder_phys_cmd_handle_wr_ptr_timeout(
+		struct sde_encoder_phys *phys_enc,
+		ktime_t profile_timestamp)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	bool switch_te;
+	int ret = -ETIMEDOUT;
+
+	switch_te = _sde_encoder_phys_cmd_needs_vsync_change(
+				phys_enc, profile_timestamp);
+
+	SDE_EVT32(DRMID(phys_enc->parent), switch_te, SDE_EVTLOG_FUNC_ENTRY);
+
+	if (switch_te) {
+		SDE_DEBUG_CMDENC(cmd_enc,
+				"wr_ptr_irq wait failed, retry with WD TE\n");
+
+		/* switch to watchdog TE and wait again */
+		sde_encoder_helper_switch_vsync(phys_enc->parent, true);
+
+		ret = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
+
+		/* switch back to default TE */
+		sde_encoder_helper_switch_vsync(phys_enc->parent, false);
+	}
+
+	/*
+	 * Signaling the retire fence at wr_ptr timeout
+	 * to allow the next commit and avoid device freeze.
+	 */
+	if (ret == -ETIMEDOUT) {
+		SDE_ERROR_CMDENC(cmd_enc,
+			"wr_ptr_irq wait failed, switch_te:%d\n", switch_te);
+		SDE_EVT32(DRMID(phys_enc->parent), switch_te, SDE_EVTLOG_ERROR);
+
+		if (sde_encoder_phys_cmd_is_master(phys_enc) &&
+		  atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0))
+			phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+	}
+
+	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;
+
+	return ret;
+}
+
 static int sde_encoder_phys_cmd_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 {
 	int rc = 0, i, pending_cnt;
 	struct sde_encoder_phys_cmd *cmd_enc;
+	ktime_t profile_timestamp = ktime_get();
 
 	if (!phys_enc)
 		return -EINVAL;
@@ -1430,8 +1521,18 @@ static int sde_encoder_phys_cmd_wait_for_commit_done(
 	/* only required for master controller */
 	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 		rc = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
-		if (rc == -ETIMEDOUT)
-			goto wait_for_idle;
+		if (rc == -ETIMEDOUT) {
+			/*
+			 * Profile all the TE received after profile_timestamp
+			 * and if the jitter is more, switch to watchdog TE
+			 * and wait for wr_ptr again. Finally move back to
+			 * default TE.
+			 */
+			rc = _sde_encoder_phys_cmd_handle_wr_ptr_timeout(
+					phys_enc, profile_timestamp);
+			if (rc == -ETIMEDOUT)
+				goto wait_for_idle;
+		}
 
 		if (cmd_enc->autorefresh.cfg.enable)
 			rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(
@@ -1753,6 +1854,10 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
 	atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
 	init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
+	INIT_LIST_HEAD(&cmd_enc->te_timestamp_list);
+	for (i = 0; i < MAX_TE_PROFILE_COUNT; i++)
+		list_add(&cmd_enc->te_timestamp[i].list,
+				&cmd_enc->te_timestamp_list);
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
 

+ 4 - 3
msm/sde/sde_hw_catalog.c

@@ -4127,14 +4127,15 @@ static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
 		sde_cfg->max_display_width = sde_cfg->max_sspp_linewidth *
 			max_horz_deci;
 	else
-		sde_cfg->max_display_width = sde_cfg->max_mixer_width *
-			sde_cfg->max_lm_per_display;
+		sde_cfg->max_display_width = sde_cfg->max_sspp_linewidth *
+			MAX_DOWNSCALE_RATIO;
 
 	if (max_vert_deci)
 		sde_cfg->max_display_height =
 			MAX_DISPLAY_HEIGHT_WITH_DECIMATION * max_vert_deci;
 	else
-		sde_cfg->max_display_height = MAX_DISPLAY_HEIGHT;
+		sde_cfg->max_display_height = MAX_DISPLAY_HEIGHT_WITH_DECIMATION
+			* MAX_DOWNSCALE_RATIO;
 
 	sde_cfg->min_display_height = MIN_DISPLAY_HEIGHT;
 	sde_cfg->min_display_width = MIN_DISPLAY_WIDTH;

+ 9 - 6
msm/sde/sde_hw_color_proc_v4.c

@@ -150,7 +150,7 @@ void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg)
 	struct drm_msm_igc_lut *lut_cfg;
 	struct sde_hw_cp_cfg *hw_cfg = cfg;
 	int i = 0, j = 0;
-	u32 *addr = NULL;
+	u32 *addr[IGC_TBL_NUM];
 	u32 offset = 0;
 
 	if (!ctx || !cfg) {
@@ -172,17 +172,20 @@ void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg)
 
 	lut_cfg = hw_cfg->payload;
 
+	addr[0] = lut_cfg->c0;
+	addr[1] = lut_cfg->c1;
+	addr[2] = lut_cfg->c2;
+
 	for (i = 0; i < IGC_TBL_NUM; i++) {
-		addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
 		offset = IGC_C0_OFF + (i * sizeof(u32));
 
 		for (j = 0; j < IGC_TBL_LEN; j++) {
-			addr[j] &= IGC_DATA_MASK;
-			addr[j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
+			addr[i][j] &= IGC_DATA_MASK;
+			addr[i][j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
 			if (j == 0)
-				addr[j] |= IGC_INDEX_UPDATE;
+				addr[i][j] |= IGC_INDEX_UPDATE;
 			/* IGC lut registers are part of DSPP Top HW block */
-			SDE_REG_WRITE(&ctx->hw_top, offset, addr[j]);
+			SDE_REG_WRITE(&ctx->hw_top, offset, addr[i][j]);
 		}
 	}
 

+ 1 - 0
msm/sde/sde_hw_ctl.c

@@ -1041,6 +1041,7 @@ static int sde_hw_ctl_reset_post_disable(struct sde_hw_ctl *ctx,
 		ctx->flush.pending_merge_3d_flush_mask =
 			BIT(merge_3d_idx - MERGE_3D_0);
 		merge_3d_active &= ~BIT(merge_3d_idx - MERGE_3D_0);
+		UPDATE_MASK(ctx->flush.pending_flush_mask, MERGE_3D_IDX, 1);
 		SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
 	}
 

+ 18 - 8
msm/sde/sde_hw_reg_dma_v1_color_proc.c

@@ -830,6 +830,7 @@ void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg)
 	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
 	int rc, i = 0;
 	u32 reg;
+	u32 *addr[GC_TBL_NUM];
 	u32 num_of_mixers, blk = 0;
 
 	rc = reg_dma_dspp_check(ctx, cfg, GC);
@@ -870,6 +871,9 @@ void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg)
 		return;
 	}
 
+	addr[0] = lut_cfg->c0;
+	addr[1] = lut_cfg->c1;
+	addr[2] = lut_cfg->c2;
 	for (i = 0; i < GC_TBL_NUM; i++) {
 		reg = 0;
 		REG_DMA_SETUP_OPS(dma_write_cfg,
@@ -885,7 +889,7 @@ void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg)
 		REG_DMA_SETUP_OPS(dma_write_cfg,
 			ctx->cap->sblk->gc.base + GC_C0_OFF +
 			(i * sizeof(u32) * 2),
-			lut_cfg->c0 + (ARRAY_SIZE(lut_cfg->c0) * i),
+			addr[i],
 			PGC_TBL_LEN * sizeof(u32),
 			REG_BLK_WRITE_INC, 0, 0, 0);
 		rc = dma_ops->setup_payload(&dma_write_cfg);
@@ -981,7 +985,7 @@ void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg)
 	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
 	struct sde_hw_dspp *dspp_list[DSPP_MAX];
 	int rc, i = 0, j = 0;
-	u32 *addr = NULL;
+	u32 *addr[IGC_TBL_NUM];
 	u32 offset = 0;
 	u32 reg;
 	u32 index, num_of_mixers, dspp_sel, blk = 0;
@@ -1038,18 +1042,20 @@ void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg)
 	for (index = 0; index < num_of_mixers; index++)
 		dspp_sel &= IGC_DSPP_SEL_MASK(dspp_list[index]->idx - 1);
 
+	addr[0] = lut_cfg->c0;
+	addr[1] = lut_cfg->c1;
+	addr[2] = lut_cfg->c2;
 	for (i = 0; i < IGC_TBL_NUM; i++) {
-		addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
 		offset = IGC_C0_OFF + (i * sizeof(u32));
 
 		for (j = 0; j < IGC_TBL_LEN; j++) {
-			addr[j] &= IGC_DATA_MASK;
-			addr[j] |= dspp_sel;
+			addr[i][j] &= IGC_DATA_MASK;
+			addr[i][j] |= dspp_sel;
 			if (j == 0)
-				addr[j] |= IGC_INDEX_UPDATE;
+				addr[i][j] |= IGC_INDEX_UPDATE;
 		}
 
-		REG_DMA_SETUP_OPS(dma_write_cfg, offset, addr,
+		REG_DMA_SETUP_OPS(dma_write_cfg, offset, addr[i],
 			IGC_TBL_LEN * sizeof(u32),
 			REG_BLK_WRITE_INC, 0, 0, 0);
 		rc = dma_ops->setup_payload(&dma_write_cfg);
@@ -2231,6 +2237,7 @@ static int reg_dmav1_setup_vig_igc_common(struct sde_hw_reg_dma_ops *dma_ops,
 	u32 lut_sel = 0, lut_enable = 0;
 	u32 *data = NULL, *data_ptr = NULL;
 	u32 igc_base = ctx->cap->sblk->igc_blk[0].base - REG_DMA_VIG_SWI_DIFF;
+	u32 *addr[IGC_TBL_NUM];
 
 	if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
 		DRM_ERROR("invalid size of payload len %d exp %zd\n",
@@ -2248,6 +2255,9 @@ static int reg_dmav1_setup_vig_igc_common(struct sde_hw_reg_dma_ops *dma_ops,
 	if (lut_enable)
 		lut_sel = (~lut_sel) && BIT(0);
 
+	addr[0] = igc_lut->c0;
+	addr[1] = igc_lut->c1;
+	addr[2] = igc_lut->c2;
 	for (i = 0; i < IGC_TBL_NUM; i++) {
 		/* write 0 to the index register */
 		index = 0;
@@ -2260,7 +2270,7 @@ static int reg_dmav1_setup_vig_igc_common(struct sde_hw_reg_dma_ops *dma_ops,
 		}
 
 		offset = igc_base + 0x1B4 + i * sizeof(u32);
-		data_ptr = igc_lut->c0 + (ARRAY_SIZE(igc_lut->c0) * i);
+		data_ptr = addr[i];
 		for (j = 0; j < VIG_1D_LUT_IGC_LEN; j++)
 			data[j] = (data_ptr[2 * j] & mask) |
 				(data_ptr[2 * j + 1] & mask) << 16;

+ 21 - 5
msm/sde/sde_hw_sspp.c

@@ -1023,10 +1023,16 @@ static void sde_hw_sspp_setup_uidle(struct sde_hw_pipe *ctx,
 }
 
 static void _setup_layer_ops_colorproc(struct sde_hw_pipe *c,
-		unsigned long features)
+		unsigned long features, bool is_virtual_pipe)
 {
 	int ret = 0;
 
+	if (is_virtual_pipe) {
+		features &=
+			~(BIT(SDE_SSPP_VIG_IGC) | BIT(SDE_SSPP_VIG_GAMUT));
+		c->cap->features = features;
+	}
+
 	if (test_bit(SDE_SSPP_HSIC, &features)) {
 		if (c->cap->sblk->hsic_blk.version ==
 			(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
@@ -1181,7 +1187,8 @@ static void sde_hw_sspp_setup_dgm_csc(struct sde_hw_pipe *ctx,
 }
 
 static void _setup_layer_ops(struct sde_hw_pipe *c,
-		unsigned long features, unsigned long perf_features)
+		unsigned long features, unsigned long perf_features,
+		bool is_virtual_pipe)
 {
 	int ret;
 
@@ -1247,7 +1254,7 @@ static void _setup_layer_ops(struct sde_hw_pipe *c,
 	if (test_bit(SDE_PERF_SSPP_UIDLE, &perf_features))
 		c->ops.setup_uidle = sde_hw_sspp_setup_uidle;
 
-	_setup_layer_ops_colorproc(c, features);
+	_setup_layer_ops_colorproc(c, features, is_virtual_pipe);
 
 	if (test_bit(SDE_SSPP_DGM_INVERSE_PMA, &features))
 		c->ops.setup_inverse_pma = sde_hw_sspp_setup_dgm_inverse_pma;
@@ -1264,6 +1271,7 @@ static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
 		struct sde_hw_blk_reg_map *b)
 {
 	int i;
+	struct sde_sspp_cfg *cfg;
 
 	if ((sspp < SSPP_MAX) && catalog && addr && b) {
 		for (i = 0; i < catalog->sspp_count; i++) {
@@ -1273,7 +1281,14 @@ static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
 				b->length = catalog->sspp[i].len;
 				b->hwversion = catalog->hwversion;
 				b->log_mask = SDE_DBG_MASK_SSPP;
-				return &catalog->sspp[i];
+
+				/* Only shallow copy is needed */
+				cfg =  kmemdup(&catalog->sspp[i], sizeof(*cfg),
+					GFP_KERNEL);
+				if (!cfg)
+					return ERR_PTR(-ENOMEM);
+
+				return cfg;
 			}
 		}
 	}
@@ -1313,7 +1328,7 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
 	hw_pipe->idx = idx;
 	hw_pipe->cap = cfg;
 	_setup_layer_ops(hw_pipe, hw_pipe->cap->features,
-		hw_pipe->cap->perf_features);
+		hw_pipe->cap->perf_features, is_virtual_pipe);
 
 	if (hw_pipe->ops.get_scaler_ver) {
 		sde_init_scaler_blk(&hw_pipe->cap->sblk->scaler_blk,
@@ -1353,6 +1368,7 @@ void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
 	if (ctx) {
 		sde_hw_blk_destroy(&ctx->base);
 		reg_dmav1_deinit_sspp_ops(ctx->idx);
+		kfree(ctx->cap);
 	}
 	kfree(ctx);
 }

+ 108 - 23
msm/sde/sde_kms.c

@@ -19,6 +19,7 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
 #include <drm/drm_crtc.h>
+#include <drm/drm_fixed.h>
 #include <linux/debugfs.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -528,12 +529,24 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 end:
 	smmu_state->sui_misr_state = NONE;
 	smmu_state->transition_type = NONE;
-	smmu_state->transition_error = ret ? true : false;
+	smmu_state->transition_error = false;
 
-	SDE_DEBUG("crtc %d: old_state %d, new_state %d, sec_lvl %d, ret %d\n",
-			DRMID(crtc), old_smmu_state, smmu_state->state,
-			smmu_state->secure_level, ret);
-	SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
+	/*
+	 * If switch failed, toggling secure_level is enough since
+	 * there are only two secure levels - secure/non-secure
+	 */
+	if (ret) {
+		smmu_state->transition_error = true;
+		smmu_state->state = smmu_state->prev_state;
+		smmu_state->secure_level = !smmu_state->secure_level;
+	}
+
+	SDE_DEBUG(
+		"crtc %d: old_state %d, req_state %d, new_state %d, sec_lvl %d, ret %d\n",
+			DRMID(crtc), smmu_state->prev_state, old_smmu_state,
+			smmu_state->state, smmu_state->secure_level, ret);
+	SDE_EVT32(DRMID(crtc), smmu_state->prev_state,
+			smmu_state->state, smmu_state->transition_type,
 			smmu_state->transition_error, smmu_state->secure_level,
 			smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
 
@@ -2456,6 +2469,52 @@ static bool sde_kms_check_for_splash(struct msm_kms *kms)
 	return sde_kms->splash_data.num_splash_displays;
 }
 
+static int sde_kms_get_mixer_count(const struct msm_kms *kms,
+		const struct drm_display_mode *mode,
+		const struct msm_resource_caps_info *res, u32 *num_lm)
+{
+	struct sde_kms *sde_kms;
+	s64 mode_clock_hz = 0;
+	s64 max_mdp_clock_hz = 0;
+	s64 mdp_fudge_factor = 0;
+	s64 temp = 0;
+	s64 htotal_fp = 0;
+	s64 vtotal_fp = 0;
+	s64 vrefresh_fp = 0;
+
+	if (!num_lm) {
+		SDE_ERROR("invalid num_lm pointer\n");
+		return -EINVAL;
+	}
+
+	*num_lm = 1;
+	if (!kms || !mode || !res) {
+		SDE_ERROR("invalid input args\n");
+		return -EINVAL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+
+	max_mdp_clock_hz = drm_fixp_from_fraction(
+			sde_kms->perf.max_core_clk_rate, 1);
+	mdp_fudge_factor = drm_fixp_from_fraction(105, 100); /* 1.05 */
+	htotal_fp = drm_fixp_from_fraction(mode->htotal, 1);
+	vtotal_fp = drm_fixp_from_fraction(mode->vtotal, 1);
+	vrefresh_fp = drm_fixp_from_fraction(mode->vrefresh, 1);
+
+	temp = drm_fixp_mul(htotal_fp, vtotal_fp);
+	temp = drm_fixp_mul(temp, vrefresh_fp);
+	mode_clock_hz = drm_fixp_mul(temp, mdp_fudge_factor);
+	if (mode_clock_hz > max_mdp_clock_hz ||
+			mode->hdisplay > res->max_mixer_width)
+		*num_lm = 2;
+	SDE_DEBUG("[%s] h=%d, v=%d, fps=%d, max_mdp_clk_hz=%llu, num_lm=%d\n",
+			mode->name, mode->htotal, mode->vtotal, mode->vrefresh,
+			sde_kms->perf.max_core_clk_rate, *num_lm);
+
+	return 0;
+}
+
 static void _sde_kms_null_commit(struct drm_device *dev,
 		struct drm_encoder *enc)
 {
@@ -2528,6 +2587,45 @@ end:
 	drm_modeset_acquire_fini(&ctx);
 }
 
+static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
+	struct device *dev)
+{
+	int i, ret;
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_connector *conn;
+	struct drm_connector_list_iter conn_iter;
+	struct msm_drm_private *priv = sde_kms->dev->dev_private;
+
+	drm_connector_list_iter_begin(ddev, &conn_iter);
+	drm_for_each_connector_iter(conn, &conn_iter) {
+		uint64_t lp;
+
+		lp = sde_connector_get_lp(conn);
+		if (lp != SDE_MODE_DPMS_LP2)
+			continue;
+
+		ret = sde_encoder_wait_for_event(conn->encoder,
+						MSM_ENC_TX_COMPLETE);
+		if (ret && ret != -EWOULDBLOCK)
+			SDE_ERROR(
+				"[conn: %d] wait for commit done returned %d\n",
+				conn->base.id, ret);
+		else if (!ret)
+			sde_encoder_idle_request(conn->encoder);
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	for (i = 0; i < priv->num_crtcs; i++) {
+		if (priv->disp_thread[i].thread)
+			kthread_flush_worker(
+				&priv->disp_thread[i].worker);
+		if (priv->event_thread[i].thread)
+			kthread_flush_worker(
+				&priv->event_thread[i].worker);
+	}
+	kthread_flush_worker(&priv->pp_event_worker);
+}
+
 static int sde_kms_pm_suspend(struct device *dev)
 {
 	struct drm_device *ddev;
@@ -2548,6 +2646,7 @@ static int sde_kms_pm_suspend(struct device *dev)
 
 	sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
 	SDE_EVT32(0);
+	pm_runtime_put_noidle(dev);
 
 	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
@@ -2630,6 +2729,7 @@ retry:
 	if (num_crtcs == 0) {
 		DRM_DEBUG("all crtcs are already in the off state\n");
 		sde_kms->suspend_block = true;
+		_sde_kms_pm_suspend_idle_helper(sde_kms, dev);
 		goto unlock;
 	}
 
@@ -2641,25 +2741,8 @@ retry:
 	}
 
 	sde_kms->suspend_block = true;
+	_sde_kms_pm_suspend_idle_helper(sde_kms, dev);
 
-	drm_connector_list_iter_begin(ddev, &conn_iter);
-	drm_for_each_connector_iter(conn, &conn_iter) {
-		uint64_t lp;
-
-		lp = sde_connector_get_lp(conn);
-		if (lp != SDE_MODE_DPMS_LP2)
-			continue;
-
-		ret = sde_encoder_wait_for_event(conn->encoder,
-						MSM_ENC_TX_COMPLETE);
-		if (ret && ret != -EWOULDBLOCK)
-			SDE_ERROR(
-				"[enc: %d] wait for commit done returned %d\n",
-				conn->encoder->base.id, ret);
-		else if (!ret)
-			sde_encoder_idle_request(conn->encoder);
-	}
-	drm_connector_list_iter_end(&conn_iter);
 unlock:
 	if (state) {
 		drm_atomic_state_put(state);
@@ -2672,6 +2755,7 @@ unlock:
 	}
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
+	pm_runtime_get_noresume(dev);
 
 	return ret;
 }
@@ -2766,6 +2850,7 @@ static const struct msm_kms_funcs kms_funcs = {
 	.get_address_space_device = _sde_kms_get_address_space_device,
 	.postopen = _sde_kms_post_open,
 	.check_for_splash = sde_kms_check_for_splash,
+	.get_mixer_count = sde_kms_get_mixer_count,
 };
 
 /* the caller api needs to turn on clock before calling it */

+ 2 - 0
msm/sde/sde_kms.h

@@ -187,12 +187,14 @@ enum frame_trigger_mode_type {
 /**
  * struct sde_kms_smmu_state_data: stores the smmu state and transition type
  * @state: current state of smmu context banks
+ * @prev_state: previous state of smmu context banks
  * @secure_level: secure level cached from crtc
  * @transition_type: transition request type
  * @transition_error: whether there is error while transitioning the state
  */
 struct sde_kms_smmu_state_data {
 	uint32_t state;
+	uint32_t prev_state;
 	uint32_t secure_level;
 	uint32_t transition_type;
 	uint32_t transition_error;

+ 10 - 2
msm/sde/sde_plane.c

@@ -2406,6 +2406,12 @@ static int _sde_atomic_check_decimation_scaler(struct drm_plane_state *state,
 	uint32_t max_upscale, max_linewidth;
 	bool inline_rotation, rt_client;
 	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_cstate;
+
+	if (!state || !state->state || !state->crtc) {
+		SDE_ERROR_PLANE(psde, "invalid arguments\n");
+		return -EINVAL;
+	}
 
 	deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
 	deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
@@ -2427,7 +2433,9 @@ static int _sde_atomic_check_decimation_scaler(struct drm_plane_state *state,
 	max_linewidth = psde->pipe_sblk->maxlinewidth;
 
 	crtc = state->crtc;
-	rt_client = sde_crtc_is_rt_client(crtc);
+	new_cstate = drm_atomic_get_new_crtc_state(state->state, crtc);
+
+	rt_client = sde_crtc_is_rt_client(crtc, new_cstate);
 
 	max_downscale_denom = 1;
 	/* inline rotation RT clients have a different max downscaling limit */
@@ -3208,7 +3216,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 		return 0;
 	pstate->pending = true;
 
-	psde->is_rt_pipe = sde_crtc_is_rt_client(crtc);
+	psde->is_rt_pipe = sde_crtc_is_rt_client(crtc, crtc->state);
 	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
 
 	_sde_plane_update_properties(plane, crtc, fb);

+ 17 - 11
msm/sde/sde_rm.c

@@ -929,19 +929,24 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
 {
 	const struct sde_lm_cfg *lm_cfg = to_sde_hw_mixer(lm->hw)->cap;
 	const struct sde_pingpong_cfg *pp_cfg;
-	bool ret;
-	u32 display_pref, cwb_pref;
+	bool ret, is_conn_primary, is_conn_secondary;
+	u32 lm_primary_pref, lm_secondary_pref, cwb_pref;
 
 	*dspp = NULL;
 	*ds = NULL;
 	*pp = NULL;
-	display_pref = lm_cfg->features & BIT(SDE_DISP_PRIMARY_PREF) ||
-			lm_cfg->features & BIT(SDE_DISP_SECONDARY_PREF);
+
+	lm_primary_pref = lm_cfg->features & BIT(SDE_DISP_PRIMARY_PREF);
+	lm_secondary_pref = lm_cfg->features & BIT(SDE_DISP_SECONDARY_PREF);
 	cwb_pref = lm_cfg->features & BIT(SDE_DISP_CWB_PREF);
+	is_conn_primary = (reqs->hw_res.display_type ==
+				 SDE_CONNECTOR_PRIMARY) ? true : false;
+	is_conn_secondary = (reqs->hw_res.display_type ==
+				 SDE_CONNECTOR_SECONDARY) ? true : false;
 
-	SDE_DEBUG("check lm %d: dspp %d ds %d pp %d disp_pref: %d cwb_pref%d\n",
-		lm_cfg->id, lm_cfg->dspp, lm_cfg->ds,
-		lm_cfg->pingpong, display_pref, cwb_pref);
+	SDE_DEBUG("check lm %d: dspp %d ds %d pp %d features %d disp type %d\n",
+		 lm_cfg->id, lm_cfg->dspp, lm_cfg->ds, lm_cfg->pingpong,
+		 lm_cfg->features, (int)reqs->hw_res.display_type);
 
 	/* Check if this layer mixer is a peer of the proposed primary LM */
 	if (primary_lm) {
@@ -956,7 +961,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
 	}
 
 	/* bypass rest of the checks if LM for primary display is found */
-	if (!display_pref) {
+	if (!lm_primary_pref && !lm_secondary_pref) {
 		/* Check lm for valid requirements */
 		ret = _sde_rm_check_lm(rm, rsvp, reqs, lm_cfg, lm,
 				dspp, ds, pp);
@@ -971,10 +976,11 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
 			SDE_DEBUG("fail: cwb supported lm not allocated\n");
 			return false;
 		}
-	} else if (!(reqs->hw_res.display_type && display_pref)) {
+	} else if ((!is_conn_primary && lm_primary_pref) ||
+			(!is_conn_secondary && lm_secondary_pref)) {
 		SDE_DEBUG(
-			"display preference is not met. display_type: %d display_pref: %d\n",
-			(int)reqs->hw_res.display_type, (int)display_pref);
+			"display preference is not met. display_type: %d lm_features: %x\n",
+			(int)reqs->hw_res.display_type, lm_cfg->features);
 		return false;
 	}
 

+ 19 - 4
msm/sde/sde_wb.h

@@ -288,9 +288,7 @@ int sde_wb_config(struct drm_device *drm_dev, void *data,
 }
 static inline
 int sde_wb_connector_post_init(struct drm_connector *connector,
-		void *info,
-		void *display,
-		struct msm_mode_info *mode_info)
+					 void *display)
 {
 	return 0;
 }
@@ -318,7 +316,8 @@ int sde_wb_connector_set_property(struct drm_connector *connector,
 	return 0;
 }
 static inline
-int sde_wb_get_info(struct msm_display_info *info, void *display)
+int sde_wb_get_info(struct drm_connector *connector,
+		struct msm_display_info *info, void *display)
 {
 	return 0;
 }
@@ -341,7 +340,23 @@ int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
 {
 	return 0;
 }
+static inline
+int sde_wb_connector_set_info_blob(struct drm_connector *connector,
+		void *info,
+		void *display,
+		struct msm_mode_info *mode_info)
+{
+	return 0;
+}
 
+static inline
+int sde_wb_get_mode_info(struct drm_connector *connector,
+		const struct drm_display_mode *drm_mode,
+		struct msm_mode_info *mode_info,
+		void *display, const struct msm_resource_caps_info *avail_res)
+{
+	return 0;
+}
 #endif
 #endif /* __SDE_WB_H__ */
 

+ 3 - 0
msm/sde_dbg.c

@@ -3754,6 +3754,9 @@ static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
 	pr_debug("%s: ctrl:0x%x len:0x%zx\n",
 		__func__, sde_dbg_base.debugfs_ctrl, len);
 
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
 	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
 		pr_err("error copying the buffer! count:0x%zx\n", count);
 		return -EFAULT;

+ 4 - 3
msm/sde_rsc.c

@@ -37,10 +37,11 @@
 /**
  * rsc_min_threshold will be set to MIN_THRESHOLD_OVERHEAD_TIME which
  * takes into account back off time + overhead from RSC/RSC_WRAPPER. The
- * overhead buffer time is required to be greater than 14. For measure,
- * this value assumes 18.
+ * overhead buffer time is required to be greater than 14. Program it
+ * with a higher value (3.3 ms), so it has sufficient time to complete
+ * the sequence in rare cases.
  */
-#define MIN_THRESHOLD_OVERHEAD_TIME	18
+#define MIN_THRESHOLD_OVERHEAD_TIME	64
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER_NUMERATOR	2

+ 7 - 2
msm/sde_rsc_hw.c

@@ -872,14 +872,19 @@ int rsc_hw_tcs_wait(struct sde_rsc_priv *rsc)
 	}
 
 	/* check for sequence running status before exiting */
-	for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+	for (count = (MAX_CHECK_LOOPS / 4); count > 0; count--) {
 		seq_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 				rsc->debug_mode) & BIT(1);
 		if (!seq_status) {
 			rc = 0;
 			break;
 		}
-		usleep_range(1, 2);
+
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+						0x1, rsc->debug_mode);
+		usleep_range(3, 4);
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+						0x0, rsc->debug_mode);
 	}
 
 	return rc;

+ 5 - 44
pll/dsi_pll_10nm.c

@@ -1189,13 +1189,6 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
 	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 	int rc;
-	u64 ref_clk = vco->ref_clk_rate;
-	u64 vco_rate;
-	u64 multiplier;
-	u32 frac;
-	u32 dec;
-	u32 outdiv;
-	u64 pll_freq, tmp64;
 
 	if (!vco->priv)
 		pr_err("vco priv is null\n");
@@ -1206,12 +1199,10 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
 	}
 
 	/*
-	 * Calculate the vco rate from HW registers only for handoff cases.
-	 * For other cases where a vco_10nm_set_rate() has already been
-	 * called, just return the rate that was set earlier. This is due
-	 * to the fact that recalculating VCO rate requires us to read the
-	 * correct value of the pll_out_div divider clock, which is only set
-	 * afterwards.
+	 * In the case when vco arte is set, the recalculation function should
+	 * return the current rate as to avoid trying to set the vco rate
+	 * again. However durng handoff, recalculation should set the flag
+	 * according to the status of PLL.
 	 */
 	if (pll->vco_current_rate != 0) {
 		pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
@@ -1228,40 +1219,10 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
 	if (!dsi_pll_10nm_lock_status(pll))
 		pll->handoff_resources = true;
 
-	dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
-	dec &= 0xFF;
-
-	frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
-	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
-		  0xFF) <<
-		8);
-	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
-		  0x3) <<
-		16);
-
-	/* OUTDIV_1:0 field is (log(outdiv, 2)) */
-	outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
-	outdiv &= 0x3;
-	outdiv = 1 << outdiv;
-
-	/*
-	 * TODO:
-	 *	1. Assumes prescaler is disabled
-	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
-	 **/
-	multiplier = 1 << 18;
-	pll_freq = dec * (ref_clk * 2);
-	tmp64 = (ref_clk * 2 * frac);
-	pll_freq += div_u64(tmp64, multiplier);
-
-	vco_rate = div_u64(pll_freq, outdiv);
-
-	pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
-		 dec, frac, outdiv, vco_rate);
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return (unsigned long)vco_rate;
+	return rc;
 }
 
 static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)

+ 37 - 41
pll/dsi_pll_7nm.c

@@ -1046,7 +1046,7 @@ static void shadow_dsi_pll_dynamic_refresh_7nm(struct dsi_pll_7nm *pll,
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
 			   (PLL_SYSTEM_MUXES + offset),
 			   (PLL_PLL_LOCKDET_RATE_1 + offset),
-			   0xc0, 0x40);
+			   0xc0, 0x10);
 	upper_addr |= (upper_8_bit(PLL_SYSTEM_MUXES + offset) << 8);
 	upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 9);
 
@@ -1077,92 +1077,88 @@ static void shadow_dsi_pll_dynamic_refresh_7nm(struct dsi_pll_7nm *pll,
 
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
 			   (PLL_ANALOG_CONTROLS_FIVE + offset),
-			   (PLL_DSM_DIVIDER + offset), 0x01, 0);
+			   (PLL_ANALOG_CONTROLS_TWO + offset), 0x01, 0x03);
 	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_FIVE + offset) << 16);
-	upper_addr |= (upper_8_bit(PLL_DSM_DIVIDER + offset) << 17);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_TWO + offset) << 17);
 
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+			   (PLL_ANALOG_CONTROLS_THREE + offset),
+			   (PLL_DSM_DIVIDER + offset),
+			   rsc->cache_pll_trim_codes[2], 0x00);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_THREE + offset) << 18);
+	upper_addr |= (upper_8_bit(PLL_DSM_DIVIDER + offset) << 19);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL10,
 			   (PLL_FEEDBACK_DIVIDER + offset),
 			   (PLL_CALIBRATION_SETTINGS + offset), 0x4E, 0x40);
-	upper_addr |= (upper_8_bit(PLL_FEEDBACK_DIVIDER + offset) << 18);
-	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 19);
+	upper_addr |= (upper_8_bit(PLL_FEEDBACK_DIVIDER + offset) << 20);
+	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 21);
 
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL11,
 			   (PLL_BAND_SEL_CAL_SETTINGS_THREE + offset),
 			   (PLL_FREQ_DETECT_SETTINGS_ONE + offset), 0xBA, 0x0C);
 	upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS_THREE + offset)
-		       << 20);
+		       << 22);
 	upper_addr |= (upper_8_bit(PLL_FREQ_DETECT_SETTINGS_ONE + offset)
-		       << 21);
+		       << 23);
 
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL12,
 			   (PLL_OUTDIV + offset),
 			   (PLL_CORE_OVERRIDE + offset), 0, 0);
-	upper_addr |= (upper_8_bit(PLL_OUTDIV + offset) << 22);
-	upper_addr |= (upper_8_bit(PLL_CORE_OVERRIDE + offset) << 23);
+	upper_addr |= (upper_8_bit(PLL_OUTDIV + offset) << 24);
+	upper_addr |= (upper_8_bit(PLL_CORE_OVERRIDE + offset) << 25);
 
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL13,
 			   (PLL_PLL_DIGITAL_TIMERS_TWO + offset),
 			   (PLL_PLL_PROP_GAIN_RATE_1 + offset),
 			    0x08, reg->pll_prop_gain_rate);
-	upper_addr |= (upper_8_bit(PLL_PLL_DIGITAL_TIMERS_TWO + offset) << 24);
-	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 25);
+	upper_addr |= (upper_8_bit(PLL_PLL_DIGITAL_TIMERS_TWO + offset) << 26);
+	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 27);
 
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL14,
 			   (PLL_PLL_BAND_SEL_RATE_1 + offset),
 			   (PLL_PLL_INT_GAIN_IFILT_BAND_1 + offset),
 			    0xC0, 0x82);
-	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 26);
+	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 28);
 	upper_addr |= (upper_8_bit(PLL_PLL_INT_GAIN_IFILT_BAND_1 + offset)
-		       << 27);
+		       << 29);
 
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL15,
 			   (PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 + offset),
 			   (PLL_PLL_LOCK_OVERRIDE + offset),
 			    0x4c, 0x80);
 	upper_addr |= (upper_8_bit(PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 + offset)
-		       << 28);
-	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_OVERRIDE + offset) << 29);
+		       << 30);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_OVERRIDE + offset) << 31);
 
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL16,
 			   (PLL_PFILT + offset),
 			   (PLL_IFILT + offset),
-			    0x2f, 0x3f);
-	upper_addr |= (upper_8_bit(PLL_PFILT + offset) << 30);
-	upper_addr |= (upper_8_bit(PLL_IFILT + offset) << 31);
-
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL16,
-			   (PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset),
-			   (PLL_FREQ_TUNE_ACCUM_INIT_MID + offset),
-			   rsc->cache_pll_trim_codes[0], rsc->cache_pll_trim_codes[1] );
-	upper_addr2 |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset) << 0);
-	upper_addr2 |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MID + offset) << 1);
+			    0x29, 0x3f);
+	upper_addr2 |= (upper_8_bit(PLL_PFILT + offset) << 0);
+	upper_addr2 |= (upper_8_bit(PLL_IFILT + offset) << 1);
 
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
-			   (PLL_PLL_BAND_SEL_RATE_1 + offset),
-			   ( PLL_PLL_BAND_SEL_RATE_1+ offset),
-			   rsc->cache_pll_trim_codes[2], rsc->cache_pll_trim_codes[2]);
-	upper_addr2 |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 0);
-	upper_addr2 |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 1);
-
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
 			   (PLL_SYSTEM_MUXES + offset),
 			   (PLL_CALIBRATION_SETTINGS + offset),
-			    0xc0, 0x40);
+			    0xe0, 0x44);
 	upper_addr2 |= (upper_8_bit(PLL_BAND_SEL_CAL + offset) << 2);
 	upper_addr2 |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 3);
+
 	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
-	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
 			   PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+
 	if (rsc->slave)
 		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
 				   DSI_DYNAMIC_REFRESH_PLL_CTRL10,
 				   PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0,
 				   data, 0x7f);
 
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
 			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
-
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
 			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);