Browse Source

Merge remote-tracking branch 'origin/display-kernel.lnx.5.4' into display-kernel.lnx.5.10

* origin/display-kernel.lnx.5.4:
  disp: msm: dsi: avoid TE status check based on rechecks count
  disp: msm: sde: move vm-ownership check to crtc custom events
  disp: msm: sde: remove HARD_RESET recovery event on frame-timeouts only
  disp: msm: read mdp intf line count and trigger dma accordingly
  disp: msm: sde: use drm mode set by user-mode in trusted-vm
  Revert "disp: msm: sde: use mdp scratch register to pass drm mode info"
  disp: msm: sde: request hard reset on vsync timeout
  disp: msm: sde: reset sw state on vm transition
  disp: msm: sde: add vblank mutex lock during irq unregister
  disp: msm: sde: move qsync validation to encoder atomic check
  disp: msm: sde: add check to fix null pointer dereference
  disp: msm: sde: add mutex lock to handle ppdone timeout usecases
  disp: msm: sde: cache encoder_mask for vblank_work
  disp: msm: sde: fix TUI CRTC selection for dual-display
  disp: msm: sde: bound crts and encoders for TUI displays
  disp: msm: dsi: avoid TE-based panel status check in Trusted-vm
  disp: msm: dsi: enable ESD trigger through debugfs in trsuted-vm
  disp: msm: dsi: parse & store gpio registers used by the host & panel
  disp: msm: sde: clear pending flushes after disable commit
  disp: msm: dsi: Add support for parsing mdp_intf base address from dt
  disp: msm: sde: fix missing error handling in VM ops
  disp: msm: sde: fix invalid dual-display TVM restriction check
  Revert "disp: msm: sde: fix race between disable commit and vblank work"
  disp: msm: dp: add error handling for host init failures
  disp: msm: hdcp: avoid sink message processing when hdcp is off
  disp: msm: dp: unify hpd event for sst and mst
  disp: msm: sde: use current crtc state during idle work scheduling
  disp: msm: dsi: recount drm mode count
  disp: msm: sde: add traces for lastclose
  disp: msm: sde: fix race between disable commit and vblank work
  disp: msm: dp: send hpd notification before updating mst_active
  disp: msm: avoid setting AMC and WAKE tage on icc vote

Change-Id: I690cba7c3c59e9d02a24160bd1b3d12660211405
Alisha Thapaliya 4 years ago
parent
commit
f9ce840444

+ 132 - 77
msm/dp/dp_display.c

@@ -807,7 +807,7 @@ static const struct component_ops dp_display_comp_ops = {
 	.unbind = dp_display_unbind,
 };
 
-static void dp_display_send_hpd_event(struct dp_display_private *dp)
+static bool dp_display_send_hpd_event(struct dp_display_private *dp)
 {
 	struct drm_device *dev = NULL;
 	struct drm_connector *connector;
@@ -816,24 +816,18 @@ static void dp_display_send_hpd_event(struct dp_display_private *dp)
 	char *envp[5];
 	int rc = 0;
 
-	if (dp->mst.mst_active) {
-		DP_DEBUG("skip notification for mst mode\n");
-		dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED);
-		return;
-	}
-
 	connector = dp->dp_display.base_connector;
 
 	if (!connector) {
 		DP_ERR("connector not set\n");
-		return;
+		return false;
 	}
 
 	connector->status = connector->funcs->detect(connector, false);
 	if (dp->cached_connector_status == connector->status) {
 		DP_DEBUG("connector status (%d) unchanged, skipping uevent\n",
 				dp->cached_connector_status);
-		return;
+		return false;
 	}
 
 	dp->cached_connector_status = connector->status;
@@ -842,7 +836,7 @@ static void dp_display_send_hpd_event(struct dp_display_private *dp)
 
 	if (dp->debug->skip_uevent) {
 		DP_INFO("skipping uevent\n");
-		goto update_state;
+		return false;
 	}
 
 	snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name);
@@ -864,14 +858,7 @@ static void dp_display_send_hpd_event(struct dp_display_private *dp)
 	rc = kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 	DP_INFO("uevent %s: %d\n", rc ? "failure" : "success", rc);
 
-update_state:
-	if (connector->status == connector_status_connected) {
-		dp_display_state_add(DP_STATE_CONNECT_NOTIFIED);
-		dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED);
-	} else {
-		dp_display_state_add(DP_STATE_DISCONNECT_NOTIFIED);
-		dp_display_state_remove(DP_STATE_CONNECT_NOTIFIED);
-	}
+	return true;
 }
 
 static int dp_display_send_hpd_notification(struct dp_display_private *dp)
@@ -900,13 +887,29 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp)
 
 	dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
 
-	if (!dp->mst.mst_active)
+	reinit_completion(&dp->notification_comp);
+
+	if (!dp->mst.mst_active) {
 		dp->dp_display.is_sst_connected = hpd;
-	else
+
+		if (!dp_display_send_hpd_event(dp))
+			goto skip_wait;
+	} else {
 		dp->dp_display.is_sst_connected = false;
 
-	reinit_completion(&dp->notification_comp);
-	dp_display_send_hpd_event(dp);
+		if (!dp->mst.cbs.hpd)
+			goto skip_wait;
+
+		dp->mst.cbs.hpd(&dp->dp_display, true);
+	}
+
+	if (hpd) {
+		dp_display_state_add(DP_STATE_CONNECT_NOTIFIED);
+		dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED);
+	} else {
+		dp_display_state_add(DP_STATE_DISCONNECT_NOTIFIED);
+		dp_display_state_remove(DP_STATE_CONNECT_NOTIFIED);
+	}
 
 	/*
 	 * Skip the wait if TUI is active considering that the user mode will
@@ -942,11 +945,9 @@ static void dp_display_update_mst_state(struct dp_display_private *dp,
 	dp->panel->mst_state = state;
 }
 
-static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
-						bool mst_probe)
+static void dp_display_mst_init(struct dp_display_private *dp)
 {
 	bool is_mst_receiver;
-	struct dp_mst_hpd_info info;
 	const unsigned long clear_mstm_ctrl_timeout_us = 100000;
 	u8 old_mstm_ctrl;
 	int ret;
@@ -957,60 +958,64 @@ static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
 		return;
 	}
 
-	DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe);
+	is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
 
-	if (!dp->mst.mst_active) {
-		is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
+	if (!is_mst_receiver) {
+		DP_MST_DEBUG("sink doesn't support mst\n");
+		return;
+	}
 
-		if (!is_mst_receiver) {
-			DP_MST_DEBUG("sink doesn't support mst\n");
-			return;
-		}
+	/* clear sink mst state */
+	drm_dp_dpcd_readb(dp->aux->drm_aux, DP_MSTM_CTRL, &old_mstm_ctrl);
+	drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
 
-		/* clear sink mst state */
-		drm_dp_dpcd_readb(dp->aux->drm_aux, DP_MSTM_CTRL,
-				&old_mstm_ctrl);
-		drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
-
-		/* add extra delay if MST state is not cleared */
-		if (old_mstm_ctrl) {
-			DP_MST_DEBUG("MSTM_CTRL is not cleared, wait %dus\n",
-					clear_mstm_ctrl_timeout_us);
-			usleep_range(clear_mstm_ctrl_timeout_us,
-				clear_mstm_ctrl_timeout_us + 1000);
-		}
+	/* add extra delay if MST state is not cleared */
+	if (old_mstm_ctrl) {
+		DP_MST_DEBUG("MSTM_CTRL is not cleared, wait %dus\n",
+				clear_mstm_ctrl_timeout_us);
+		usleep_range(clear_mstm_ctrl_timeout_us,
+			clear_mstm_ctrl_timeout_us + 1000);
+	}
 
-		ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
-				 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
-		if (ret < 0) {
-			DP_ERR("sink mst enablement failed\n");
-			return;
-		}
+	ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
+				DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+	if (ret < 0) {
+		DP_ERR("sink mst enablement failed\n");
+		return;
+	}
 
-		dp_display_update_mst_state(dp, true);
-	} else if (dp->mst.mst_active && mst_probe) {
-		info.mst_protocol = dp->parser->has_mst_sideband;
-		info.mst_port_cnt = dp->debug->mst_port_cnt;
-		info.edid = dp->debug->get_edid(dp->debug);
+	dp_display_update_mst_state(dp, true);
+}
+
+static void dp_display_set_mst_mgr_state(struct dp_display_private *dp,
+					bool state)
+{
+	struct dp_mst_hpd_info info = {0};
 
-		if (dp->mst.cbs.set_mgr_state)
-			dp->mst.cbs.set_mgr_state(&dp->dp_display, true, &info);
+	if (!dp->mst.mst_active)
+		return;
 
-		if (dp->mst.cbs.hpd)
-			dp->mst.cbs.hpd(&dp->dp_display, true);
+	info.mst_protocol = dp->parser->has_mst_sideband;
+	if (state) {
+		info.mst_port_cnt = dp->debug->mst_port_cnt;
+		info.edid = dp->debug->get_edid(dp->debug);
 	}
 
-	DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active);
+	if (dp->mst.cbs.set_mgr_state)
+		dp->mst.cbs.set_mgr_state(&dp->dp_display, state, &info);
+
+	DP_MST_DEBUG("mst_mgr_state: %d\n", state);
 }
 
-static void dp_display_host_init(struct dp_display_private *dp)
+static int dp_display_host_init(struct dp_display_private *dp)
 {
 	bool flip = false;
 	bool reset;
+	int rc = 0;
 
 	if (dp_display_state_is(DP_STATE_INITIALIZED)) {
 		dp_display_state_log("[already initialized]");
-		return;
+		return rc;
 	}
 
 	if (dp->hpd->orientation == ORIENTATION_CC2)
@@ -1018,9 +1023,21 @@ static void dp_display_host_init(struct dp_display_private *dp)
 
 	reset = dp->debug->sim_mode ? false : !dp->hpd->multi_func;
 
-	dp->power->init(dp->power, flip);
+	rc = dp->power->init(dp->power, flip);
+	if (rc) {
+		DP_WARN("Power init failed.\n");
+		SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE1, dp->state);
+		return rc;
+	}
+
 	dp->hpd->host_init(dp->hpd, &dp->catalog->hpd);
-	dp->ctrl->init(dp->ctrl, flip, reset);
+	rc = dp->ctrl->init(dp->ctrl, flip, reset);
+	if (rc) {
+		DP_WARN("Ctrl init Failed.\n");
+		SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE2, dp->state);
+		goto error_ctrl;
+	}
+
 	enable_irq(dp->irq);
 	dp_display_abort_hdcp(dp, false);
 
@@ -1028,6 +1045,12 @@ static void dp_display_host_init(struct dp_display_private *dp)
 
 	/* log this as it results from user action of cable connection */
 	DP_INFO("[OK]\n");
+	return rc;
+
+error_ctrl:
+	dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd);
+	dp->power->deinit(dp->power);
+	return rc;
 }
 
 static void dp_display_host_ready(struct dp_display_private *dp)
@@ -1148,7 +1171,24 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 	 */
 	if (dp_display_state_is(DP_STATE_SRC_PWRDN) &&
 			dp_display_state_is(DP_STATE_CONFIGURED)) {
-		dp_display_host_init(dp);
+		rc = dp_display_host_init(dp);
+		if (rc) {
+			DP_WARN("Host init Failed");
+			if (!dp_display_state_is(DP_STATE_SUSPENDED)) {
+				/*
+				 * If not suspended no point of going forward if
+				 * resource is not enabled.
+				 */
+				dp_display_state_remove(DP_STATE_CONNECTED);
+			}
+			goto end;
+		}
+
+		/*
+		 * If device is suspended and host_init fails, there is
+		 * one more chance for host init to happen in prepare which
+		 * is why DP_STATE_SRC_PWRDN is removed only at success.
+		 */
 		dp_display_state_remove(DP_STATE_SRC_PWRDN);
 	}
 
@@ -1174,7 +1214,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 	dp->link->process_request(dp->link);
 	dp->panel->handle_sink_request(dp->panel);
 
-	dp_display_process_mst_hpd_high(dp, false);
+	dp_display_mst_init(dp);
 
 	rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
 			dp->panel->fec_en, dp->panel->dsc_en, false);
@@ -1185,7 +1225,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 
 	dp->process_hpd_connect = false;
 
-	dp_display_process_mst_hpd_high(dp, true);
+	dp_display_set_mst_mgr_state(dp, true);
 end:
 	mutex_unlock(&dp->session_lock);
 
@@ -1234,7 +1274,6 @@ skip_notify:
 static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
 {
 	int rc = 0;
-	struct dp_mst_hpd_info info = {0};
 
 	if (dp->mst.mst_active) {
 		DP_MST_DEBUG("mst_hpd_low work\n");
@@ -1250,17 +1289,12 @@ static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
 		if (dp->mst.cbs.hpd)
 			dp->mst.cbs.hpd(&dp->dp_display, false);
 
-		dp_display_update_mst_state(dp, false);
-
 		if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
 				dp_display_state_is(DP_STATE_ENABLED)))
 			rc = dp_display_send_hpd_notification(dp);
 
-		if (dp->mst.cbs.set_mgr_state) {
-			info.mst_protocol = dp->parser->has_mst_sideband;
-			dp->mst.cbs.set_mgr_state(&dp->dp_display, false,
-					&info);
-		}
+		dp_display_update_mst_state(dp, false);
+		dp_display_set_mst_mgr_state(dp, false);
 	}
 
 	DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active);
@@ -1326,7 +1360,12 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
 	dp_display_state_remove(DP_STATE_ABORTED);
 	dp_display_state_add(DP_STATE_CONFIGURED);
 
-	dp_display_host_init(dp);
+	rc = dp_display_host_init(dp);
+	if (rc) {
+		DP_ERR("Host init Failed");
+		mutex_unlock(&dp->session_lock);
+		return rc;
+	}
 
 	/* check for hpd high */
 	if (dp->hpd->hpd_high)
@@ -2119,7 +2158,23 @@ static int dp_display_prepare(struct dp_display *dp_display, void *panel)
 	 */
 	if (dp_display_state_is(DP_STATE_SRC_PWRDN) &&
 			dp_display_state_is(DP_STATE_CONFIGURED)) {
-		dp_display_host_init(dp);
+		rc = dp_display_host_init(dp);
+		if (rc) {
+			/*
+			 * Skip all the events that are similar to abort case, just that
+			 * the stream clks should be enabled so that no commit failure can
+			 * be seen.
+			 */
+			DP_ERR("Host init failed.\n");
+			goto end;
+		}
+
+		/*
+		 * Remove DP_STATE_SRC_PWRDN flag on successful host_init to
+		 * prevent cases such as below.
+		 * 1. MST stream 1 failed to do host init then stream 2 can retry again.
+		 * 2. Resume path fails, now sink sends hpd_high=0 and hpd_high=1.
+		 */
 		dp_display_state_remove(DP_STATE_SRC_PWRDN);
 	}
 

+ 0 - 3
msm/dsi/dsi_catalog.c

@@ -83,7 +83,6 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
 		ctrl->ops.config_clk_gating = NULL;
 		ctrl->ops.configure_cmddma_window = NULL;
 		ctrl->ops.reset_trig_ctrl = NULL;
-		ctrl->ops.map_mdp_regs = NULL;
 		ctrl->ops.log_line_count = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_0:
@@ -102,7 +101,6 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
 		ctrl->ops.config_clk_gating = NULL;
 		ctrl->ops.configure_cmddma_window = NULL;
 		ctrl->ops.reset_trig_ctrl = NULL;
-		ctrl->ops.map_mdp_regs = NULL;
 		ctrl->ops.log_line_count = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_2:
@@ -129,7 +127,6 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
 			dsi_ctrl_hw_22_configure_cmddma_window;
 		ctrl->ops.reset_trig_ctrl =
 			dsi_ctrl_hw_22_reset_trigger_controls;
-		ctrl->ops.map_mdp_regs = dsi_ctrl_hw_22_map_mdp_regs;
 		ctrl->ops.log_line_count = dsi_ctrl_hw_22_log_line_count;
 		break;
 	default:

+ 0 - 2
msm/dsi/dsi_catalog.h

@@ -283,7 +283,5 @@ void dsi_ctrl_hw_22_configure_cmddma_window(struct dsi_ctrl_hw *ctrl,
 		u32 line_no, u32 window);
 void dsi_ctrl_hw_22_reset_trigger_controls(struct dsi_ctrl_hw *ctrl,
 				       struct dsi_host_common_cfg *cfg);
-int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev,
-		struct dsi_ctrl_hw *ctrl);
 u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode);
 #endif /* _DSI_CATALOG_H_ */

+ 69 - 4
msm/dsi/dsi_ctrl.c

@@ -637,6 +637,7 @@ static int dsi_ctrl_init_regmap(struct platform_device *pdev,
 		}
 		ctrl->hw.mmss_misc_base = ptr;
 		ctrl->hw.disp_cc_base = NULL;
+		ctrl->hw.mdp_intf_base = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_2:
 	case DSI_CTRL_VERSION_2_3:
@@ -650,6 +651,10 @@ static int dsi_ctrl_init_regmap(struct platform_device *pdev,
 		}
 		ctrl->hw.disp_cc_base = ptr;
 		ctrl->hw.mmss_misc_base = NULL;
+
+		ptr = msm_ioremap(pdev, "mdp_intf_base", ctrl->name);
+		if (!IS_ERR(ptr))
+			ctrl->hw.mdp_intf_base = ptr;
 		break;
 	default:
 		break;
@@ -1334,6 +1339,25 @@ static void dsi_configure_command_scheduling(struct dsi_ctrl *dsi_ctrl,
 			sched_line_no, window);
 }
 
+static u32 calculate_schedule_line(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+	u32 line_no = 0x1;
+	struct dsi_mode_info *timing;
+
+	/* check if custom dma scheduling line needed */
+	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+		(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
+		line_no = dsi_ctrl->host_config.common_config.dma_sched_line;
+
+	timing = &(dsi_ctrl->host_config.video_timing);
+
+	if (timing)
+		line_no += timing->v_back_porch + timing->v_sync_width +
+			timing->v_active;
+
+	return line_no;
+}
+
 static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 				const struct mipi_dsi_msg *msg,
 				struct dsi_ctrl_cmd_dma_fifo_info *cmd,
@@ -2100,9 +2124,6 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
 		goto fail_clks;
 	}
 
-	if (dsi_ctrl->hw.ops.map_mdp_regs)
-		dsi_ctrl->hw.ops.map_mdp_regs(pdev, &dsi_ctrl->hw);
-
 	item->ctrl = dsi_ctrl;
 	sde_dbg_dsi_ctrl_register(dsi_ctrl->hw.base, dsi_ctrl->name);
 
@@ -3389,6 +3410,10 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
 {
 	int rc = 0;
 	struct dsi_ctrl_hw_ops dsi_hw_ops;
+	u32 v_total = 0, fps = 0, cur_line = 0, mem_latency_us = 100;
+	u32 line_time = 0, schedule_line = 0x1, latency_by_line = 0;
+	struct dsi_mode_info *timing;
+	unsigned long flag;
 
 	if (!dsi_ctrl) {
 		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
@@ -3404,6 +3429,18 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
+	timing = &(dsi_ctrl->host_config.video_timing);
+
+	if (timing &&
+		(dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE)) {
+		v_total = timing->v_sync_width + timing->v_back_porch +
+			timing->v_front_porch + timing->v_active;
+		fps = timing->refresh_rate;
+		schedule_line = calculate_schedule_line(dsi_ctrl, flags);
+		line_time = (1000000 / fps) / v_total;
+		latency_by_line = CEIL(mem_latency_us, line_time);
+	}
+
 	if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
 		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
 		if (dsi_ctrl->enable_cmd_dma_stats) {
@@ -3426,7 +3463,35 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
 		/* trigger command */
-		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
+		if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+			dsi_hw_ops.schedule_dma_cmd &&
+			(dsi_ctrl->current_state.vid_engine_state ==
+			DSI_CTRL_ENGINE_ON)) {
+			/*
+			 * This change reads the video line count from
+			 * MDP_INTF_LINE_COUNT register and checks whether
+			 * DMA trigger happens close to the schedule line.
+			 * If it is not close to the schedule line, then DMA
+			 * command transfer is triggered.
+			 */
+			while (1) {
+				local_irq_save(flag);
+				cur_line =
+				dsi_hw_ops.log_line_count(&dsi_ctrl->hw,
+					dsi_ctrl->cmd_mode);
+				if (cur_line <
+					(schedule_line - latency_by_line) ||
+					cur_line > (schedule_line + 1)) {
+					dsi_hw_ops.trigger_command_dma(
+						&dsi_ctrl->hw);
+					local_irq_restore(flag);
+					break;
+				}
+				local_irq_restore(flag);
+				udelay(1000);
+			}
+		} else
+			dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
 
 		if (dsi_ctrl->enable_cmd_dma_stats) {
 			u32 reg = dsi_hw_ops.log_line_count(&dsi_ctrl->hw,

+ 6 - 17
msm/dsi/dsi_ctrl_hw.h

@@ -860,14 +860,6 @@ struct dsi_ctrl_hw_ops {
 	void (*reset_trig_ctrl)(struct dsi_ctrl_hw *ctrl,
 			struct dsi_host_common_cfg *cfg);
 
-	/**
-	 * hw.ops.map_mdp_regs() - maps MDP interface line count registers.
-	 * @pdev:	Pointer to platform device.
-	 * @ctrl:	Pointer to the controller host hardware.
-	 */
-	int (*map_mdp_regs)(struct platform_device *pdev,
-			struct dsi_ctrl_hw *ctrl);
-
 	/**
 	 * hw.ops.log_line_count() - reads the MDP interface line count
 	 *			     registers.
@@ -885,13 +877,11 @@ struct dsi_ctrl_hw_ops {
  * @mmss_misc_length:       Length of mmss_misc register map.
  * @disp_cc_base:           Base address of disp_cc register map.
  * @disp_cc_length:         Length of disp_cc register map.
- * @te_rd_ptr_reg:	    Address of MDP_TEAR_INTF_TEAR_LINE_COUNT. This
- *			    register is used for testing and validating the RD
- *			    ptr value when a CMD is triggered and it succeeds.
- * @line_count_reg:	    Address of MDP_TEAR_INTF_LINE_COUNT. This
- *			    register is used for testing and validating the
- *			    line count value when a CMD is triggered and it
- *			    succeeds.
+ * @mdp_intf_base:	    Base address of mdp_intf register map. Addresses of
+ *			    MDP_TEAR_INTF_TEAR_LINE_COUNT and MDP_TEAR_INTF_LINE_COUNT
+ *			    are mapped using the base address to test and validate
+ *			    the RD ptr value and line count value respectively when
+ *			    a CMD is triggered and it succeeds.
  * @index:                  Instance ID of the controller.
  * @feature_map:            Features supported by the DSI controller.
  * @ops:                    Function pointers to the operations supported by the
@@ -912,9 +902,8 @@ struct dsi_ctrl_hw {
 	void __iomem *mmss_misc_base;
 	u32 mmss_misc_length;
 	void __iomem *disp_cc_base;
-	void __iomem *te_rd_ptr_reg;
-	void __iomem *line_count_reg;
 	u32 disp_cc_length;
+	void __iomem *mdp_intf_base;
 	u32 index;
 
 	/* features */

+ 12 - 64
msm/dsi/dsi_ctrl_hw_2_2.c

@@ -14,11 +14,10 @@
 #define DSI_DMA_SCHEDULE_CTRL 0x100
 #define DSI_DMA_SCHEDULE_CTRL2 0x0104
 
-/* MDP INTF registers to be mapped for debug feature*/
-#define MDP_INTF1_TEAR_LINE_COUNT 0xAE36298
-#define MDP_INTF2_TEAR_LINE_COUNT 0xAE37298
-#define MDP_INTF1_LINE_COUNT 0xAE360B0
-#define MDP_INTF2_LINE_COUNT 0xAE370B0
+/* offset addresses of MDP INTF base register, to be mapped for debug feature */
+#define MDP_INTF_TEAR_OFFSET 0x280
+#define MDP_INTF_TEAR_LINE_COUNT_OFFSET 0x30
+#define MDP_INTF_LINE_COUNT_OFFSET 0xB0
 
 void dsi_ctrl_hw_22_setup_lane_map(struct dsi_ctrl_hw *ctrl,
 		       struct dsi_lane_map *lane_map)
@@ -260,61 +259,6 @@ void dsi_ctrl_hw_22_reset_trigger_controls(struct dsi_ctrl_hw *ctrl,
 	ctrl->reset_trig_ctrl = false;
 }
 
-/**
- * dsi_ctrl_hw_22_map_mdp_regs() - maps MDP interface line count registers.
- * @pdev:	Pointer to platform device.
- * @ctrl:	Pointer to the controller host hardware.
- *
- * Return: 0 on success and error on failure.
- */
-int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev,
-		struct dsi_ctrl_hw *ctrl)
-{
-	int rc = 0;
-	void __iomem *ptr = NULL, *ptr1 = NULL;
-
-	if (ctrl->index == 0) {
-		ptr = devm_ioremap(&pdev->dev, MDP_INTF1_TEAR_LINE_COUNT, 1);
-		if (IS_ERR_OR_NULL(ptr)) {
-			DSI_CTRL_HW_ERR(ctrl,
-				"MDP TE LINE COUNT address not found\n");
-			rc = PTR_ERR(ptr);
-			return rc;
-		}
-
-		ptr1 = devm_ioremap(&pdev->dev, MDP_INTF1_LINE_COUNT, 1);
-		if (IS_ERR_OR_NULL(ptr1)) {
-			DSI_CTRL_HW_ERR(ctrl,
-				"MDP TE LINE COUNT address not found\n");
-			rc = PTR_ERR(ptr1);
-			return rc;
-		}
-	}
-
-	if (ctrl->index == 1) {
-		ptr = devm_ioremap(&pdev->dev, MDP_INTF2_TEAR_LINE_COUNT, 1);
-		if (IS_ERR_OR_NULL(ptr)) {
-			DSI_CTRL_HW_ERR(ctrl,
-				"MDP TE LINE COUNT address not found\n");
-			rc = PTR_ERR(ptr);
-			return rc;
-		}
-
-		ptr1 = devm_ioremap(&pdev->dev, MDP_INTF2_LINE_COUNT, 1);
-		if (IS_ERR_OR_NULL(ptr1)) {
-			DSI_CTRL_HW_ERR(ctrl,
-				"MDP TE LINE COUNT address not found\n");
-			rc = PTR_ERR(ptr1);
-			return rc;
-		}
-	}
-
-	ctrl->te_rd_ptr_reg = ptr;
-	ctrl->line_count_reg = ptr1;
-
-	return rc;
-}
-
 /**
  * dsi_ctrl_hw_22_log_line_count() - reads the MDP interface line count
  *				     registers.
@@ -328,10 +272,14 @@ u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode)
 
 	u32 reg = 0;
 
-	if (cmd_mode && ctrl->te_rd_ptr_reg)
-		reg = readl_relaxed(ctrl->te_rd_ptr_reg);
-	else if (ctrl->line_count_reg)
-		reg = readl_relaxed(ctrl->line_count_reg);
+	if (IS_ERR_OR_NULL(ctrl->mdp_intf_base))
+		return reg;
 
+	if (cmd_mode)
+		reg = readl_relaxed(ctrl->mdp_intf_base + MDP_INTF_TEAR_OFFSET
+					+ MDP_INTF_TEAR_LINE_COUNT_OFFSET);
+	else
+		reg = readl_relaxed(ctrl->mdp_intf_base
+					+ MDP_INTF_LINE_COUNT_OFFSET);
 	return reg;
 }

+ 0 - 1
msm/dsi/dsi_ctrl_hw_cmn.c

@@ -845,7 +845,6 @@ void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
 void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
 {
 	DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
-	DSI_CTRL_HW_DBG(ctrl, "CMD DMA triggered\n");
 }
 
 /**

+ 7 - 2
msm/dsi/dsi_display.c

@@ -800,6 +800,9 @@ static int dsi_display_status_check_te(struct dsi_display *display,
 	int rc = 1, i = 0;
 	int const esd_te_timeout = msecs_to_jiffies(3*20);
 
+	if (!rechecks)
+		return rc;
+
 	dsi_display_change_te_irq_status(display, true);
 
 	for (i = 0; i < rechecks; i++) {
@@ -858,7 +861,8 @@ int dsi_display_check_status(struct drm_connector *connector, void *display,
 	if (te_check_override)
 		te_rechecks = MAX_TE_RECHECKS;
 
-	if (panel->panel_mode == DSI_OP_VIDEO_MODE)
+	if ((dsi_display->trusted_vm_env) ||
+			(panel->panel_mode == DSI_OP_VIDEO_MODE))
 		te_rechecks = 0;
 
 	ret = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
@@ -1529,7 +1533,8 @@ static ssize_t debugfs_esd_trigger_check(struct file *file,
 
 	if (display->esd_trigger) {
 		DSI_INFO("ESD attack triggered by user\n");
-		rc = dsi_panel_trigger_esd_attack(display->panel);
+		rc = dsi_panel_trigger_esd_attack(display->panel,
+						display->trusted_vm_env);
 		if (rc) {
 			DSI_ERR("Failed to trigger ESD attack\n");
 			goto error;

+ 5 - 2
msm/dsi/dsi_drm.c

@@ -1152,7 +1152,7 @@ void dsi_conn_set_allowed_mode_switch(struct drm_connector *connector,
 	struct list_head *mode_list = &connector->modes;
 	struct dsi_display *disp = display;
 	struct dsi_panel *panel;
-	int mode_count, rc = 0;
+	int mode_count = 0, rc = 0;
 	struct dsi_display_mode_priv_info *dsi_mode_info, *cmp_dsi_mode_info;
 	bool allow_switch = false;
 
@@ -1162,7 +1162,8 @@ void dsi_conn_set_allowed_mode_switch(struct drm_connector *connector,
 	}
 
 	panel = disp->panel;
-	mode_count = panel->num_display_modes;
+	list_for_each_entry(drm_mode, &connector->modes, head)
+		mode_count++;
 
 	list_for_each_entry(drm_mode, &connector->modes, head) {
 
@@ -1180,6 +1181,8 @@ void dsi_conn_set_allowed_mode_switch(struct drm_connector *connector,
 		mode_list = mode_list->next;
 		cmp_mode_idx = 1;
 		list_for_each_entry(cmp_drm_mode, mode_list, head) {
+			if (&cmp_drm_mode->head == &connector->modes)
+				continue;
 			convert_to_dsi_mode(cmp_drm_mode, &dsi_mode);
 
 			rc = dsi_display_find_mode(display, &dsi_mode,

+ 96 - 64
msm/dsi/dsi_panel.c

@@ -191,29 +191,52 @@ static int dsi_panel_gpio_release(struct dsi_panel *panel)
 	return rc;
 }
 
-int dsi_panel_trigger_esd_attack(struct dsi_panel *panel)
+int dsi_panel_trigger_esd_attack(struct dsi_panel *panel, bool trusted_vm_env)
 {
-	struct dsi_panel_reset_config *r_config;
-
 	if (!panel) {
 		DSI_ERR("Invalid panel param\n");
 		return -EINVAL;
 	}
 
-	r_config = &panel->reset_config;
-	if (!r_config) {
-		DSI_ERR("Invalid panel reset configuration\n");
-		return -EINVAL;
-	}
+	/* toggle reset-gpio by writing directly to register in trusted-vm */
+	if (trusted_vm_env) {
+		struct dsi_tlmm_gpio *gpio = NULL;
+		void __iomem *io;
+		u32 offset = 0x4;
+		int i;
 
-	if (gpio_is_valid(r_config->reset_gpio)) {
+		for (i = 0; i < panel->tlmm_gpio_count; i++)
+			if (!strcmp(panel->tlmm_gpio[i].name, "reset-gpio"))
+				gpio = &panel->tlmm_gpio[i];
+
+		if (!gpio) {
+			DSI_ERR("reset gpio not found\n");
+			return -EINVAL;
+		}
+
+		io = ioremap(gpio->addr, gpio->size);
+		writel_relaxed(0, io + offset);
+		iounmap(io);
+
+	} else {
+		struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+		if (!r_config) {
+			DSI_ERR("Invalid panel reset configuration\n");
+			return -EINVAL;
+		}
+
+		if (!gpio_is_valid(r_config->reset_gpio)) {
+			DSI_ERR("failed to pull down gpio\n");
+			return -EINVAL;
+		}
 		gpio_set_value(r_config->reset_gpio, 0);
-		SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
-		DSI_INFO("GPIO pulled low to simulate ESD\n");
-		return 0;
 	}
-	DSI_ERR("failed to pull down gpio\n");
-	return -EINVAL;
+
+	SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
+	DSI_INFO("GPIO pulled low to simulate ESD\n");
+
+	return 0;
 }
 
 static int dsi_panel_reset(struct dsi_panel *panel)
@@ -2130,63 +2153,19 @@ int dsi_panel_get_io_resources(struct dsi_panel *panel,
 	struct list_head temp_head;
 	struct msm_io_mem_entry *io_mem, *pos, *tmp;
 	struct list_head *mem_list = &io_res->mem;
-	int i, rc = 0, address_count, pin_count;
-	u32 *pins = NULL, *address = NULL;
-	u32 base, size;
-	struct dsi_parser_utils *utils = &panel->utils;
+	int i, rc = 0;
 
 	INIT_LIST_HEAD(&temp_head);
 
-	address_count = utils->count_u32_elems(utils->data,
-				"qcom,dsi-panel-gpio-address");
-	if (address_count != 2) {
-		DSI_DEBUG("panel gpio address not defined\n");
-		return 0;
-	}
-
-	address =  kzalloc(sizeof(u32) * address_count, GFP_KERNEL);
-	if (!address)
-		return -ENOMEM;
-
-	rc = utils->read_u32_array(utils->data, "qcom,dsi-panel-gpio-address",
-				address, address_count);
-	if (rc) {
-		DSI_ERR("panel gpio address not defined correctly\n");
-		goto end;
-	}
-	base = address[0];
-	size = address[1];
-
-	pin_count = utils->count_u32_elems(utils->data,
-				"qcom,dsi-panel-gpio-pins");
-	if (pin_count < 0) {
-		DSI_ERR("panel gpio pins not defined\n");
-		rc = pin_count;
-		goto end;
-	}
-
-	pins =  kzalloc(sizeof(u32) * pin_count, GFP_KERNEL);
-	if (!pins) {
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rc = utils->read_u32_array(utils->data, "qcom,dsi-panel-gpio-pins",
-				pins, pin_count);
-	if (rc) {
-		DSI_ERR("panel gpio pins not defined correctly\n");
-		goto end;
-	}
-
-	for (i = 0; i < pin_count; i++) {
+	for (i = 0; i < panel->tlmm_gpio_count; i++) {
 		io_mem = kzalloc(sizeof(*io_mem), GFP_KERNEL);
 		if (!io_mem) {
 			rc = -ENOMEM;
 			goto parse_fail;
 		}
 
-		io_mem->base = base + (pins[i] * size);
-		io_mem->size = size;
+		io_mem->base = panel->tlmm_gpio[i].addr;
+		io_mem->size = panel->tlmm_gpio[i].size;
 
 		list_add(&io_mem->list, &temp_head);
 	}
@@ -2200,8 +2179,6 @@ parse_fail:
 		kzfree(pos);
 	}
 end:
-	kzfree(pins);
-	kzfree(address);
 	return rc;
 }
 
@@ -2290,6 +2267,54 @@ error:
 	return rc;
 }
 
+static int dsi_panel_parse_tlmm_gpio(struct dsi_panel *panel)
+{
+	struct dsi_parser_utils *utils = &panel->utils;
+	u32 base, size, pin;
+	int pin_count, address_count, name_count, i;
+
+	address_count = of_property_count_u32_elems(utils->data,
+				"qcom,dsi-panel-gpio-address");
+	if (address_count != 2) {
+		DSI_DEBUG("panel gpio address not defined\n");
+		return 0;
+	}
+
+	of_property_read_u32_index(utils->data,
+			"qcom,dsi-panel-gpio-address", 0, &base);
+	of_property_read_u32_index(utils->data,
+			"qcom,dsi-panel-gpio-address", 1, &size);
+
+	pin_count = of_property_count_u32_elems(utils->data,
+				"qcom,dsi-panel-gpio-pins");
+	name_count = of_property_count_strings(utils->data,
+				"qcom,dsi-panel-gpio-names");
+	if ((pin_count < 0) || (name_count < 0) || (pin_count != name_count)) {
+		DSI_ERR("invalid gpio pins/names\n");
+		return -EINVAL;
+	}
+
+	panel->tlmm_gpio = kcalloc(pin_count,
+				sizeof(struct dsi_tlmm_gpio), GFP_KERNEL);
+	if (!panel->tlmm_gpio)
+		return -ENOMEM;
+
+	panel->tlmm_gpio_count = pin_count;
+	for (i = 0; i < pin_count; i++) {
+		of_property_read_u32_index(utils->data,
+				"qcom,dsi-panel-gpio-pins", i, &pin);
+		panel->tlmm_gpio[i].num = pin;
+		panel->tlmm_gpio[i].addr = base + (pin * size);
+		panel->tlmm_gpio[i].size = size;
+
+		of_property_read_string_index(utils->data,
+				"qcom,dsi-panel-gpio-names", i,
+				&(panel->tlmm_gpio[i].name));
+	}
+
+	return 0;
+}
+
 static int dsi_panel_parse_bl_pwm_config(struct dsi_panel *panel)
 {
 	int rc = 0;
@@ -3487,6 +3512,12 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 		goto error;
 	}
 
+	rc = dsi_panel_parse_tlmm_gpio(panel);
+	if (rc) {
+		DSI_ERR("failed to parse panel tlmm gpios, rc=%d\n", rc);
+		goto error;
+	}
+
 	rc = dsi_panel_parse_power_cfg(panel);
 	if (rc)
 		DSI_ERR("failed to parse power config, rc=%d\n", rc);
@@ -3645,6 +3676,7 @@ int dsi_panel_drv_deinit(struct dsi_panel *panel)
 	if (rc)
 		DSI_ERR("[%s] failed to put regs, rc=%d\n", panel->name, rc);
 
+	kfree(panel->tlmm_gpio);
 	panel->host = NULL;
 	memset(&panel->mipi_device, 0x0, sizeof(panel->mipi_device));
 

+ 11 - 1
msm/dsi/dsi_panel.h

@@ -182,6 +182,13 @@ struct dsi_panel_spr_info {
 	enum msm_display_spr_pack_type pack_type;
 };
 
+struct dsi_tlmm_gpio {
+	u32 num;
+	u32 addr;
+	u32 size;
+	const char *name;
+};
+
 struct dsi_panel;
 
 struct dsi_panel_ops {
@@ -253,6 +260,9 @@ struct dsi_panel {
 	int power_mode;
 	enum dsi_panel_physical_type panel_type;
 
+	struct dsi_tlmm_gpio *tlmm_gpio;
+	u32 tlmm_gpio_count;
+
 	struct dsi_panel_ops panel_ops;
 };
 
@@ -288,7 +298,7 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 				int topology_override,
 				bool trusted_vm_env);
 
-int dsi_panel_trigger_esd_attack(struct dsi_panel *panel);
+int dsi_panel_trigger_esd_attack(struct dsi_panel *panel, bool trusted_vm_env);
 
 void dsi_panel_put(struct dsi_panel *panel);
 

+ 0 - 1
msm/msm_atomic.c

@@ -156,7 +156,6 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 			old_conn_state, i) {
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
-		struct drm_crtc_state *old_crtc_state;
 
 		/*
 		 * Shut down everything that's in the changeset and currently

+ 1 - 4
msm/msm_drv.c

@@ -366,7 +366,6 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
 	struct vblank_work *cur_work;
-	struct drm_crtc *crtc;
 	struct kthread_worker *worker;
 
 	if (!priv || crtc_id >= priv->num_crtcs)
@@ -376,8 +375,6 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 	if (!cur_work)
 		return -ENOMEM;
 
-	crtc = priv->crtcs[crtc_id];
-
 	kthread_init_work(&cur_work->work, vblank_ctrl_worker);
 	cur_work->crtc_id = crtc_id;
 	cur_work->enable = enable;
@@ -897,7 +894,7 @@ static int msm_drm_component_init(struct device *dev)
 	drm_mode_config_reset(ddev);
 
 	if (kms && kms->funcs && kms->funcs->cont_splash_config) {
-		ret = kms->funcs->cont_splash_config(kms);
+		ret = kms->funcs->cont_splash_config(kms, NULL);
 		if (ret) {
 			dev_err(dev, "kms cont_splash config failed.\n");
 			goto fail;

+ 2 - 1
msm/msm_kms.h

@@ -121,7 +121,8 @@ struct msm_kms_funcs {
 	/* destroys debugfs */
 	void (*debugfs_destroy)(struct msm_kms *kms);
 	/* handle continuous splash  */
-	int (*cont_splash_config)(struct msm_kms *kms);
+	int (*cont_splash_config)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
 	/* check for continuous splash status */
 	bool (*check_for_splash)(struct msm_kms *kms);
 	/* topology lm information */

+ 0 - 30
msm/sde/sde_connector.c

@@ -2364,10 +2364,6 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
 		struct drm_atomic_state *state)
 {
 	struct sde_connector *c_conn;
-	struct sde_connector_state *c_state;
-	bool qsync_dirty = false, has_modeset = false;
-	struct drm_connector_state *new_conn_state;
-	struct drm_crtc_state *new_crtc_state = NULL;
 
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
@@ -2375,32 +2371,6 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
 	}
 
 	c_conn = to_sde_connector(connector);
-	new_conn_state = drm_atomic_get_new_connector_state(state, connector);
-
-	if (!new_conn_state) {
-		SDE_ERROR("invalid connector state\n");
-		return -EINVAL;
-	}
-
-	c_state = to_sde_connector_state(new_conn_state);
-	if (new_conn_state->crtc)
-		new_crtc_state = drm_atomic_get_new_crtc_state(state,
-					new_conn_state->crtc);
-
-	has_modeset = sde_crtc_atomic_check_has_modeset(new_conn_state->state,
-						new_conn_state->crtc);
-	qsync_dirty = msm_property_is_dirty(&c_conn->property_info,
-					&c_state->property_state,
-					CONNECTOR_PROP_QSYNC_MODE);
-
-	SDE_DEBUG("has_modeset %d qsync_dirty %d\n", has_modeset, qsync_dirty);
-	if (has_modeset && qsync_dirty && new_crtc_state &&
-		!msm_is_mode_seamless_vrr(&new_crtc_state->adjusted_mode)) {
-		SDE_ERROR("invalid qsync update during modeset\n");
-		return -EINVAL;
-	}
-	new_conn_state = drm_atomic_get_new_connector_state(state, connector);
-
 	if (c_conn->ops.atomic_check)
 		return c_conn->ops.atomic_check(connector,
 				c_conn->display, state);

+ 32 - 32
msm/sde/sde_crtc.c

@@ -1193,6 +1193,9 @@ static int pstate_cmp(const void *a, const void *b)
 	int pa_zpos, pb_zpos;
 	enum sde_layout pa_layout, pb_layout;
 
+	if ((!pa || !pa->sde_pstate) || (!pb || !pb->sde_pstate))
+		return rc;
+
 	pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
 	pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
 
@@ -3564,11 +3567,10 @@ static void _sde_crtc_remove_pipe_flush(struct drm_crtc *crtc)
 	}
 }
 
-static void _sde_crtc_schedule_idle_notify(struct drm_crtc *crtc,
-		struct drm_crtc_state *old_state)
+static void _sde_crtc_schedule_idle_notify(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct sde_crtc_state *cstate = to_sde_crtc_state(old_state);
+	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
 	struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
 	struct msm_drm_private *priv;
 	struct msm_drm_thread *event_thread;
@@ -3815,19 +3817,19 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 
-	_sde_crtc_schedule_idle_notify(crtc, old_state);
+	_sde_crtc_schedule_idle_notify(crtc);
 
 	SDE_ATRACE_END("crtc_commit");
 }
 
 /**
- * _sde_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * _sde_crtc_vblank_enable - update power resource and vblank request
  * @sde_crtc: Pointer to sde crtc structure
  * @enable: Whether to enable/disable vblanks
  *
  * @Return: error code
  */
-static int _sde_crtc_vblank_enable_no_lock(
+static int _sde_crtc_vblank_enable(
 		struct sde_crtc *sde_crtc, bool enable)
 {
 	struct drm_crtc *crtc;
@@ -3839,38 +3841,38 @@ static int _sde_crtc_vblank_enable_no_lock(
 	}
 
 	crtc = &sde_crtc->base;
+	SDE_EVT32(DRMID(crtc), enable, sde_crtc->enabled,
+			crtc->state->encoder_mask,
+			sde_crtc->cached_encoder_mask);
 
 	if (enable) {
 		int ret;
 
-		/* drop lock since power crtc cb may try to re-acquire lock */
-		mutex_unlock(&sde_crtc->crtc_lock);
 		ret = pm_runtime_get_sync(crtc->dev->dev);
-		mutex_lock(&sde_crtc->crtc_lock);
 		if (ret < 0)
 			return ret;
 
+		mutex_lock(&sde_crtc->crtc_lock);
 		drm_for_each_encoder_mask(enc, crtc->dev,
-			crtc->state->encoder_mask) {
-			SDE_EVT32(DRMID(&sde_crtc->base), DRMID(enc), enable,
-					sde_crtc->enabled);
+				sde_crtc->cached_encoder_mask) {
+			SDE_EVT32(DRMID(crtc), DRMID(enc));
 
 			sde_encoder_register_vblank_callback(enc,
 					sde_crtc_vblank_cb, (void *)crtc);
 		}
+
+		mutex_unlock(&sde_crtc->crtc_lock);
 	} else {
+		mutex_lock(&sde_crtc->crtc_lock);
 		drm_for_each_encoder_mask(enc, crtc->dev,
-			crtc->state->encoder_mask) {
-			SDE_EVT32(DRMID(&sde_crtc->base), DRMID(enc), enable,
-					sde_crtc->enabled);
+				sde_crtc->cached_encoder_mask) {
+			SDE_EVT32(DRMID(crtc), DRMID(enc));
 
 			sde_encoder_register_vblank_callback(enc, NULL, NULL);
 		}
 
-		/* drop lock since power crtc cb may try to re-acquire lock */
 		mutex_unlock(&sde_crtc->crtc_lock);
 		pm_runtime_put_sync(crtc->dev->dev);
-		mutex_lock(&sde_crtc->crtc_lock);
 	}
 
 	return 0;
@@ -3972,7 +3974,7 @@ static void sde_crtc_clear_cached_mixer_cfg(struct drm_crtc *crtc)
 	SDE_EVT32(DRMID(crtc));
 }
 
-static void sde_crtc_reset_sw_state_for_ipc(struct drm_crtc *crtc)
+void sde_crtc_reset_sw_state(struct drm_crtc *crtc)
 {
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
 	struct drm_plane *plane;
@@ -4075,7 +4077,7 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
 		sde_cp_crtc_pre_ipc(crtc);
 		break;
 	case SDE_POWER_EVENT_POST_DISABLE:
-		sde_crtc_reset_sw_state_for_ipc(crtc);
+		sde_crtc_reset_sw_state(crtc);
 		sde_cp_crtc_suspend(crtc);
 		event.type = DRM_EVENT_SDE_POWER;
 		event.length = sizeof(power_on);
@@ -4159,18 +4161,17 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
 	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
 			(u8 *)&power_on);
 
-	if (atomic_read(&sde_crtc->frame_pending)) {
-		mutex_unlock(&sde_crtc->crtc_lock);
-		_sde_crtc_flush_event_thread(crtc);
-		mutex_lock(&sde_crtc->crtc_lock);
-	}
+	mutex_unlock(&sde_crtc->crtc_lock);
+	_sde_crtc_flush_event_thread(crtc);
+	mutex_lock(&sde_crtc->crtc_lock);
 
 	kthread_cancel_delayed_work_sync(&sde_crtc->static_cache_read_work);
 	kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work);
 
-	SDE_EVT32(DRMID(crtc), sde_crtc->enabled,
-			crtc->state->active, crtc->state->enable);
+	SDE_EVT32(DRMID(crtc), sde_crtc->enabled, crtc->state->active,
+			crtc->state->enable, sde_crtc->cached_encoder_mask);
 	sde_crtc->enabled = false;
+	sde_crtc->cached_encoder_mask = 0;
 
 	/* Try to disable uidle */
 	sde_core_perf_crtc_update_uidle(crtc, false);
@@ -4279,8 +4280,11 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
 	 * Avoid drm_crtc_vblank_on during seamless DMS case
 	 * when CRTC is already in enabled state
 	 */
-	if (!sde_crtc->enabled)
+	if (!sde_crtc->enabled) {
+		/* cache the encoder mask now for vblank work */
+		sde_crtc->cached_encoder_mask = crtc->state->encoder_mask;
 		drm_crtc_vblank_on(crtc);
+	}
 
 	mutex_lock(&sde_crtc->crtc_lock);
 	SDE_EVT32(DRMID(crtc), sde_crtc->enabled);
@@ -5119,15 +5123,11 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
 	}
 	sde_crtc = to_sde_crtc(crtc);
 
-	mutex_lock(&sde_crtc->crtc_lock);
-	SDE_EVT32(DRMID(&sde_crtc->base), en, sde_crtc->enabled);
-	ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, en);
+	ret = _sde_crtc_vblank_enable(sde_crtc, en);
 	if (ret)
 		SDE_ERROR("%s vblank enable failed: %d\n",
 				sde_crtc->name, ret);
 
-	mutex_unlock(&sde_crtc->crtc_lock);
-
 	return 0;
 }
 

+ 9 - 0
msm/sde/sde_crtc.h

@@ -291,6 +291,7 @@ struct sde_crtc_misr_info {
  * @static_cache_read_work: delayed worker to transition cache state to read
  * @cache_state     : Current static image cache state
  * @dspp_blob_info  : blob containing dspp hw capability information
+ * @cached_encoder_mask : cached encoder_mask for vblank work
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -381,6 +382,7 @@ struct sde_crtc {
 	enum sde_crtc_cache_state cache_state;
 
 	struct drm_property_blob *dspp_blob_info;
+	u32 cached_encoder_mask;
 };
 
 enum sde_crtc_dirty_flags {
@@ -917,4 +919,11 @@ void sde_crtc_static_cache_read_kickoff(struct drm_crtc *crtc);
 int sde_crtc_get_num_datapath(struct drm_crtc *crtc,
 		struct drm_connector *connector);
 
+/**
+ * sde_crtc_reset_sw_state - reset dirty proerties on crtc and
+ *				planes attached to the crtc
+ * @crtc: Pointer to DRM crtc object
+ */
+void sde_crtc_reset_sw_state(struct drm_crtc *crtc);
+
 #endif /* _SDE_CRTC_H_ */

+ 32 - 21
msm/sde/sde_encoder.c

@@ -941,6 +941,7 @@ static int sde_encoder_virt_atomic_check(
 	struct sde_crtc_state *sde_crtc_state = NULL;
 	enum sde_rm_topology_name old_top;
 	int ret = 0;
+	bool qsync_dirty = false, has_modeset = false;
 
 	if (!drm_enc || !crtc_state || !conn_state) {
 		SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
@@ -997,6 +998,19 @@ static int sde_encoder_virt_atomic_check(
 	}
 
 	drm_mode_set_crtcinfo(adj_mode, 0);
+
+	has_modeset = sde_crtc_atomic_check_has_modeset(conn_state->state,
+				conn_state->crtc);
+	qsync_dirty = msm_property_is_dirty(&sde_conn->property_info,
+				&sde_conn_state->property_state,
+				CONNECTOR_PROP_QSYNC_MODE);
+
+	if (has_modeset && qsync_dirty &&
+		!msm_is_mode_seamless_vrr(adj_mode)) {
+		SDE_ERROR("invalid qsync update during modeset\n");
+		return -EINVAL;
+	}
+
 	SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags,
 		 old_top, adj_mode->vrefresh, adj_mode->hdisplay,
 		 adj_mode->vdisplay, adj_mode->htotal, adj_mode->vtotal);
@@ -2906,8 +2920,10 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 		struct sde_encoder_phys_wb *wb_enc)
 {
 	struct sde_encoder_virt *sde_enc;
+	struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+	struct sde_ctl_flush_cfg cfg;
 
-	phys_enc->hw_ctl->ops.reset(phys_enc->hw_ctl);
+	ctl->ops.reset(ctl);
 	sde_encoder_helper_reset_mixers(phys_enc, NULL);
 
 	if (wb_enc) {
@@ -2915,10 +2931,8 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 			wb_enc->hw_wb->ops.bind_pingpong_blk(wb_enc->hw_wb,
 					false, phys_enc->hw_pp->idx);
 
-			if (phys_enc->hw_ctl->ops.update_bitmask)
-				phys_enc->hw_ctl->ops.update_bitmask(
-						phys_enc->hw_ctl,
-						SDE_HW_FLUSH_WB,
+			if (ctl->ops.update_bitmask)
+				ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_WB,
 						wb_enc->hw_wb->idx, true);
 		}
 	} else {
@@ -2927,10 +2941,8 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 					phys_enc->hw_intf, false,
 					phys_enc->hw_pp->idx);
 
-			if (phys_enc->hw_ctl->ops.update_bitmask)
-				phys_enc->hw_ctl->ops.update_bitmask(
-						phys_enc->hw_ctl,
-						SDE_HW_FLUSH_INTF,
+			if (ctl->ops.update_bitmask)
+				ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_INTF,
 						phys_enc->hw_intf->idx, true);
 		}
 	}
@@ -2938,10 +2950,8 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 	if (phys_enc->hw_pp && phys_enc->hw_pp->ops.reset_3d_mode) {
 		phys_enc->hw_pp->ops.reset_3d_mode(phys_enc->hw_pp);
 
-		if (phys_enc->hw_ctl->ops.update_bitmask &&
-				phys_enc->hw_pp->merge_3d)
-			phys_enc->hw_ctl->ops.update_bitmask(
-					phys_enc->hw_ctl, SDE_HW_FLUSH_MERGE_3D,
+		if (ctl->ops.update_bitmask && phys_enc->hw_pp->merge_3d)
+			ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_MERGE_3D,
 					phys_enc->hw_pp->merge_3d->idx, true);
 	}
 
@@ -2950,23 +2960,24 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 		phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
 				false, phys_enc->hw_pp->idx);
 
-		if (phys_enc->hw_ctl->ops.update_bitmask)
-			phys_enc->hw_ctl->ops.update_bitmask(
-					phys_enc->hw_ctl, SDE_HW_FLUSH_CDM,
+		if (ctl->ops.update_bitmask)
+			ctl->ops.update_bitmask(ctl, SDE_HW_FLUSH_CDM,
 					phys_enc->hw_cdm->idx, true);
 	}
 
 	sde_enc = to_sde_encoder_virt(phys_enc->parent);
 
 	if (phys_enc == sde_enc->cur_master && phys_enc->hw_pp &&
-			phys_enc->hw_ctl->ops.reset_post_disable)
-		phys_enc->hw_ctl->ops.reset_post_disable(
-				phys_enc->hw_ctl, &phys_enc->intf_cfg_v1,
+			ctl->ops.reset_post_disable)
+		ctl->ops.reset_post_disable(ctl, &phys_enc->intf_cfg_v1,
 				phys_enc->hw_pp->merge_3d ?
 				phys_enc->hw_pp->merge_3d->idx : 0);
 
-	phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
-	phys_enc->hw_ctl->ops.trigger_start(phys_enc->hw_ctl);
+	ctl->ops.get_pending_flush(ctl, &cfg);
+	SDE_EVT32(DRMID(phys_enc->parent), cfg.pending_flush_mask);
+	ctl->ops.trigger_flush(ctl);
+	ctl->ops.trigger_start(ctl);
+	ctl->ops.clear_pending_flush(ctl);
 }
 
 static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,

+ 5 - 9
msm/sde/sde_encoder_phys_cmd.c

@@ -23,8 +23,6 @@
 #define to_sde_encoder_phys_cmd(x) \
 	container_of(x, struct sde_encoder_phys_cmd, base)
 
-#define PP_TIMEOUT_MAX_TRIALS	4
-
 /*
  * Tearcheck sync start and continue thresholds are empirically found
  * based on common panels In the future, may want to allow panels to override
@@ -507,7 +505,6 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
 	u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR
 				| SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
 	struct drm_connector *conn;
-	int event;
 	u32 pending_kickoff_cnt;
 	unsigned long lock_flags;
 
@@ -541,26 +538,25 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
 				pending_kickoff_cnt);
 
 		SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
+		mutex_lock(phys_enc->vblank_ctl_lock);
 		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
 		if (sde_kms_is_secure_session_inprogress(phys_enc->sde_kms))
 			SDE_DBG_DUMP("secure", "all", "dbg_bus");
 		else
 			SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
 		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+		mutex_unlock(phys_enc->vblank_ctl_lock);
 	}
 
 	/*
 	 * if the recovery event is registered by user, don't panic
 	 * trigger panic on first timeout if no listener registered
 	 */
-	if (recovery_events) {
-		event = cmd_enc->pp_timeout_report_cnt > PP_TIMEOUT_MAX_TRIALS ?
-			SDE_RECOVERY_HARD_RESET : SDE_RECOVERY_CAPTURE;
+	if (recovery_events)
 		sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY,
-				sizeof(uint8_t), event);
-	} else if (cmd_enc->pp_timeout_report_cnt) {
+				sizeof(uint8_t), SDE_RECOVERY_CAPTURE);
+	else if (cmd_enc->pp_timeout_report_cnt)
 		SDE_DBG_DUMP("dsi_dbg_bus", "panic");
-	}
 
 	/* request a ctl reset before the next kickoff */
 	phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;

+ 26 - 15
msm/sde/sde_encoder_phys_vid.c

@@ -26,9 +26,6 @@
 #define to_sde_encoder_phys_vid(x) \
 	container_of(x, struct sde_encoder_phys_vid, base)
 
-/* maximum number of consecutive kickoff errors */
-#define KICKOFF_MAX_ERRORS	2
-
 /* Poll time to do recovery during active region */
 #define POLL_TIME_USEC_FOR_LN_CNT 500
 #define MAX_POLL_CNT 10
@@ -877,12 +874,15 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
 	u32 event = SDE_ENCODER_FRAME_EVENT_ERROR |
 		SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE |
 		SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+	struct drm_connector *conn;
 
 	if (!phys_enc) {
 		pr_err("invalid encoder\n");
 		return -EINVAL;
 	}
 
+	conn = phys_enc->connector;
+
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
@@ -893,10 +893,16 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
 
 	if (notify && (ret == -ETIMEDOUT) &&
 	    atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0) &&
-	    phys_enc->parent_ops.handle_frame_done)
+	    phys_enc->parent_ops.handle_frame_done) {
 		phys_enc->parent_ops.handle_frame_done(
 			phys_enc->parent, phys_enc, event);
 
+		if (sde_encoder_recovery_events_enabled(phys_enc->parent))
+			sde_connector_event_notify(conn,
+				DRM_EVENT_SDE_HW_RECOVERY,
+				sizeof(uint8_t), SDE_RECOVERY_HARD_RESET);
+	}
+
 	SDE_EVT32(DRMID(phys_enc->parent), event, notify, ret,
 			ret ? SDE_EVTLOG_FATAL : 0);
 	return ret;
@@ -922,8 +928,8 @@ static int sde_encoder_phys_vid_prepare_for_kickoff(
 	struct sde_hw_ctl *ctl;
 	bool recovery_events;
 	struct drm_connector *conn;
-	int event;
 	int rc;
+	int irq_enable;
 
 	if (!phys_enc || !params || !phys_enc->hw_ctl) {
 		SDE_ERROR("invalid encoder/parameters\n");
@@ -952,27 +958,32 @@ static int sde_encoder_phys_vid_prepare_for_kickoff(
 		/* to avoid flooding, only log first time, and "dead" time */
 		if (vid_enc->error_count == 1) {
 			SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
+			mutex_lock(phys_enc->vblank_ctl_lock);
 
-			sde_encoder_helper_unregister_irq(
+			irq_enable = atomic_read(&phys_enc->vblank_refcount);
+
+			if (irq_enable)
+				sde_encoder_helper_unregister_irq(
 					phys_enc, INTR_IDX_VSYNC);
+
 			SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
-			sde_encoder_helper_register_irq(
+
+			if (irq_enable)
+				sde_encoder_helper_register_irq(
 					phys_enc, INTR_IDX_VSYNC);
+
+			mutex_unlock(phys_enc->vblank_ctl_lock);
 		}
 
 		/*
 		 * if the recovery event is registered by user, don't panic
 		 * trigger panic on first timeout if no listener registered
 		 */
-		if (recovery_events) {
-			event = vid_enc->error_count > KICKOFF_MAX_ERRORS ?
-				SDE_RECOVERY_HARD_RESET : SDE_RECOVERY_CAPTURE;
-			sde_connector_event_notify(conn,
-					DRM_EVENT_SDE_HW_RECOVERY,
-					sizeof(uint8_t), event);
-		} else {
+		if (recovery_events)
+			sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY,
+					sizeof(uint8_t), SDE_RECOVERY_CAPTURE);
+		else
 			SDE_DBG_DUMP("panic");
-		}
 
 		/* request a ctl reset before the next flush */
 		phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;

+ 0 - 49
msm/sde/sde_hw_top.c

@@ -9,7 +9,6 @@
 #include "sde_dbg.h"
 #include "sde_kms.h"
 
-#define SCRATCH_REGISTER_0                0x14
 #define SSPP_SPARE                        0x28
 #define UBWC_DEC_HW_VERSION               0x058
 #define UBWC_STATIC                       0x144
@@ -615,51 +614,6 @@ static u32 sde_hw_get_autorefresh_status(struct sde_hw_mdp *mdp, u32 intf_idx)
 	return autorefresh_status;
 }
 
-static void sde_hw_clear_mode_index(struct sde_hw_mdp *mdp)
-{
-	struct sde_hw_blk_reg_map c;
-
-	if (!mdp)
-		return;
-
-	c = mdp->hw;
-	c.blk_off = 0x0;
-
-	SDE_REG_WRITE(&c, SCRATCH_REGISTER_0, 0x0);
-}
-
-static void sde_hw_set_mode_index(struct sde_hw_mdp *mdp, u32 display_id,
-		u32 mode)
-{
-	struct sde_hw_blk_reg_map c;
-	u32 value = 0;
-
-	if (!mdp)
-		return;
-
-	c = mdp->hw;
-	c.blk_off = 0x0;
-
-	/* 4-bits for mode index of each display */
-	value = SDE_REG_READ(&c, SCRATCH_REGISTER_0);
-	value |= (mode << (display_id * 4));
-	SDE_REG_WRITE(&c, SCRATCH_REGISTER_0, value);
-}
-
-static u32 sde_hw_get_mode_index(struct sde_hw_mdp *mdp, u32 display_id)
-{
-	struct sde_hw_blk_reg_map c;
-	u32 value = 0;
-
-	c = mdp->hw;
-	c.blk_off = 0x0;
-
-	value = SDE_REG_READ(&c, SCRATCH_REGISTER_0);
-	value = (value >> (display_id * 4)) & 0xF;
-
-	return value;
-}
-
 static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
 		unsigned long cap)
 {
@@ -677,9 +631,6 @@ static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
 	ops->reset_ubwc = sde_hw_reset_ubwc;
 	ops->intf_audio_select = sde_hw_intf_audio_select;
 	ops->set_mdp_hw_events = sde_hw_mdp_events;
-	ops->set_mode_index = sde_hw_set_mode_index;
-	ops->get_mode_index = sde_hw_get_mode_index;
-	ops->clear_mode_index = sde_hw_clear_mode_index;
 	if (cap & BIT(SDE_MDP_VSYNC_SEL))
 		ops->setup_vsync_source = sde_hw_setup_vsync_source;
 	else

+ 0 - 22
msm/sde/sde_hw_top.h

@@ -207,28 +207,6 @@ struct sde_hw_mdp_ops {
 	 */
 	void (*set_mdp_hw_events)(struct sde_hw_mdp *mdp, bool enable);
 
-	/**
-	 * clear_mode_index - clears the mode index in spare reg
-	 * @mdp: mdp top context driver
-	 */
-	void (*clear_mode_index)(struct sde_hw_mdp *mdp);
-
-	/**
-	 * set_mode_index - sets the current drm mode index to spare reg
-	 * @mdp: mdp top context driver
-	 * @display_id: display index
-	 * @mode: drm mode index
-	 */
-	void (*set_mode_index)(struct sde_hw_mdp *mdp, u32 display_id,
-			u32 mode);
-
-	/**
-	 * get_mode_index - gets the current drm mode index from spare reg
-	 * @mdp: mdp top context driver
-	 * @display_id: display index
-	 */
-	u32 (*get_mode_index)(struct sde_hw_mdp *mdp, u32 display_id);
-
 	/**
 	 * set_cwb_ppb_cntl - select the data point for CWB
 	 * @mdp: mdp top context driver

+ 224 - 143
msm/sde/sde_kms.c

@@ -993,11 +993,39 @@ static void _sde_kms_drm_check_dpms(struct drm_atomic_state *old_state,
 
 }
 
+static struct drm_crtc *sde_kms_vm_get_vm_crtc(
+		struct drm_atomic_state *state)
+{
+	int i;
+	enum sde_crtc_vm_req vm_req = VM_REQ_NONE;
+	struct drm_crtc *crtc, *vm_crtc = NULL;
+	struct drm_crtc_state *new_cstate, *old_cstate;
+	struct sde_crtc_state *vm_cstate;
+
+	for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
+		if (!new_cstate->active && !old_cstate->active)
+			continue;
+
+		vm_cstate = to_sde_crtc_state(new_cstate);
+		vm_req = sde_crtc_get_property(vm_cstate,
+				CRTC_PROP_VM_REQ_STATE);
+		if (vm_req != VM_REQ_NONE) {
+			SDE_DEBUG("valid vm request:%d found on crtc-%d\n",
+					vm_req, crtc->base.id);
+			vm_crtc = crtc;
+			break;
+		}
+	}
+
+	return vm_crtc;
+}
+
 int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
 				      struct drm_atomic_state *state)
 {
 	struct drm_device *ddev;
 	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_cstate;
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 	struct sde_vm_ops *vm_ops;
@@ -1011,10 +1039,12 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
 	if (!vm_ops)
 		return -EINVAL;
 
-	crtc = state->crtcs[0].ptr;
-
-	cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+	crtc = sde_kms_vm_get_vm_crtc(state);
+	if (!crtc)
+		return 0;
 
+	new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
+	cstate = to_sde_crtc_state(new_cstate);
 	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
 	if (vm_req != VM_REQ_ACQUIRE)
 		return 0;
@@ -1055,13 +1085,19 @@ int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
 {
 	struct drm_device *ddev;
 	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_cstate;
 	struct sde_crtc_state *cstate;
 	enum sde_crtc_vm_req vm_req;
 
 	ddev = sde_kms->dev;
 
-	cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+	crtc = sde_kms_vm_get_vm_crtc(state);
+	if (!crtc)
+		return 0;
 
+	new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
+	cstate = to_sde_crtc_state(new_cstate);
 	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
 	if (vm_req != VM_REQ_ACQUIRE)
 		return 0;
@@ -1236,59 +1272,6 @@ static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
 	}
 }
 
-void _sde_kms_program_mode_info(struct sde_kms *sde_kms)
-{
-	struct drm_encoder *encoder;
-	struct drm_crtc *crtc;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	struct dsi_display *dsi_display;
-	struct drm_display_mode *drm_mode;
-	int i;
-	struct drm_device *dev;
-	u32 mode_index = 0;
-
-	if (!sde_kms->dev || !sde_kms->hw_mdp)
-		return;
-
-	dev = sde_kms->dev;
-	sde_kms->hw_mdp->ops.clear_mode_index(sde_kms->hw_mdp);
-
-	for (i = 0; i < sde_kms->dsi_display_count; i++) {
-		dsi_display = (struct dsi_display *)sde_kms->dsi_displays[i];
-
-		if (dsi_display->bridge->base.encoder) {
-			encoder = dsi_display->bridge->base.encoder;
-			crtc = encoder->crtc;
-
-			if (!crtc->state->active)
-				continue;
-
-			mutex_lock(&dev->mode_config.mutex);
-			drm_connector_list_iter_begin(dev, &conn_iter);
-			drm_for_each_connector_iter(connector, &conn_iter) {
-				if (connector->encoder_ids[0]
-						== encoder->base.id)
-					break;
-			}
-			drm_connector_list_iter_end(&conn_iter);
-			mutex_unlock(&dev->mode_config.mutex);
-
-			list_for_each_entry(drm_mode, &connector->modes, head) {
-				if (drm_mode_equal(
-						&crtc->state->mode, drm_mode))
-					break;
-				mode_index++;
-			}
-
-			sde_kms->hw_mdp->ops.set_mode_index(
-					sde_kms->hw_mdp, i, mode_index);
-			SDE_DEBUG("crtc:%d, display_idx:%d, mode_index:%d\n",
-					DRMID(crtc), i, mode_index);
-		}
-	}
-}
-
 int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
 	struct drm_atomic_state *state)
 {
@@ -1308,16 +1291,15 @@ int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
 	vm_ops = sde_vm_get_ops(sde_kms);
 	ddev = sde_kms->dev;
 
-	crtc = state->crtcs[0].ptr;
-	new_cstate = state->crtcs[0].new_state;
-	cstate = to_sde_crtc_state(new_cstate);
+	crtc = sde_kms_vm_get_vm_crtc(state);
+	if (!crtc)
+		return 0;
 
+	new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
+	cstate = to_sde_crtc_state(new_cstate);
 	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
 	if (vm_req != VM_REQ_RELEASE)
-		return rc;
-
-	if (!new_cstate->active && !new_cstate->active_changed)
-		return rc;
+		return 0;
 
 	/* if vm_req is enabled, once CRTC on the commit is guaranteed */
 	sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
@@ -1356,7 +1338,9 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
 
 	ddev = sde_kms->dev;
 
-	crtc = state->crtcs[0].ptr;
+	crtc = sde_kms_vm_get_vm_crtc(state);
+	if (!crtc)
+		return 0;
 
 	/* if vm_req is enabled, once CRTC on the commit is guaranteed */
 	sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
@@ -1383,6 +1367,9 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
 	/* disable vblank events */
 	drm_crtc_vblank_off(crtc);
 
+	/* reset sw state */
+	sde_crtc_reset_sw_state(crtc);
+
 	return rc;
 }
 
@@ -1392,6 +1379,7 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
 	struct sde_vm_ops *vm_ops;
 	struct sde_crtc_state *cstate;
 	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_cstate;
 	enum sde_crtc_vm_req vm_req;
 	int rc = 0;
 
@@ -1400,27 +1388,32 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
 
 	vm_ops = sde_vm_get_ops(sde_kms);
 
-	crtc = state->crtcs[0].ptr;
-	cstate = to_sde_crtc_state(state->crtcs[0].new_state);
+	crtc = sde_kms_vm_get_vm_crtc(state);
+	if (!crtc)
+		return 0;
 
+	new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
+	cstate = to_sde_crtc_state(new_cstate);
 	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
 	if (vm_req != VM_REQ_RELEASE)
-		goto exit;
+		return 0;
 
 	/* handle SDE pre-release */
-	sde_kms_vm_pre_release(sde_kms, state);
+	rc = sde_kms_vm_pre_release(sde_kms, state);
+	if (rc) {
+		SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
+		goto exit;
+	}
 
 	/* properly handoff color processing features */
 	sde_cp_crtc_vm_primary_handoff(crtc);
 
-	/* program the current drm mode info to scratch reg */
-	_sde_kms_program_mode_info(sde_kms);
-
 	/* handle non-SDE clients pre-release */
 	if (vm_ops->vm_client_pre_release) {
 		rc = vm_ops->vm_client_pre_release(sde_kms);
 		if (rc) {
-			SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
+			SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
+					rc);
 			goto exit;
 		}
 	}
@@ -2460,6 +2453,7 @@ static void sde_kms_lastclose(struct msm_kms *kms)
 	}
 
 	state->acquire_ctx = &ctx;
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
 
 retry:
 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
@@ -2483,12 +2477,13 @@ out_ctx:
 	if (ret)
 		SDE_ERROR("kms lastclose failed: %d\n", ret);
 
+	SDE_EVT32(ret, SDE_EVTLOG_FUNC_EXIT);
 	return;
 
 backoff:
 	drm_atomic_state_clear(state);
 	drm_modeset_backoff(&ctx);
-
+	SDE_EVT32(ret, SDE_EVTLOG_FUNC_CASE1);
 	goto retry;
 }
 
@@ -2509,6 +2504,10 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
 	enum sde_crtc_idle_pc_state idle_pc_state;
 	struct sde_mdss_cfg *catalog;
 	int rc = 0;
+	struct sde_connector *sde_conn;
+	struct dsi_display *dsi_display;
+	struct drm_connector *connector;
+	struct drm_connector_state *new_connstate;
 
 	if (!kms || !state)
 		return -EINVAL;
@@ -2589,6 +2588,29 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
 			crtc_encoder_cnt++;
 	}
 
+	SDE_EVT32(old_vm_req, new_vm_req, vm_ops->vm_owns_hw(sde_kms));
+	SDE_DEBUG("VM  o_state:%d, n_state:%d, hw_owner:%d\n", old_vm_req,
+			new_vm_req, vm_ops->vm_owns_hw(sde_kms));
+
+	for_each_new_connector_in_state(state, connector, new_connstate, i) {
+		int conn_mask = active_cstate->connector_mask;
+
+		if (drm_connector_mask(connector) & conn_mask) {
+			sde_conn = to_sde_connector(connector);
+			dsi_display = (struct dsi_display *) sde_conn->display;
+
+			SDE_EVT32(DRMID(connector), DRMID(active_crtc), i,
+					dsi_display->type,
+					dsi_display->trusted_vm_env);
+			SDE_DEBUG(
+			"VM display:%s, conn:%d, crtc:%d, type:%d, tvm:%d,",
+					dsi_display->name, DRMID(connector),
+					DRMID(active_crtc), dsi_display->type,
+					dsi_display->trusted_vm_env);
+			break;
+		}
+	}
+
 	/* Check for single crtc commits only on valid VM requests */
 	if (active_crtc && global_active_crtc &&
 		(commit_crtc_cnt > catalog->max_trusted_vm_displays ||
@@ -2626,6 +2648,9 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
 				vm_ops->vm_owns_hw(sde_kms), rc);
 			goto end;
 		}
+
+		if (vm_ops->vm_resource_init)
+			rc = vm_ops->vm_resource_init(sde_kms, state);
 	}
 
 end:
@@ -2721,29 +2746,26 @@ static void sde_kms_vm_res_release(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
 	struct drm_crtc *crtc;
-	struct drm_crtc_state *new_cstate, *old_cstate;
+	struct drm_crtc_state *new_cstate;
+	struct sde_crtc_state *cstate;
 	struct sde_vm_ops *vm_ops;
 	enum sde_crtc_vm_req vm_req;
 	struct sde_kms *sde_kms = to_sde_kms(kms);
-	int i;
-
-	for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
-		struct sde_crtc_state *new_state;
-
-		if (!new_cstate->active && !old_cstate->active)
-			continue;
-
-		new_state = to_sde_crtc_state(new_cstate);
-		vm_req = sde_crtc_get_property(new_state,
-					CRTC_PROP_VM_REQ_STATE);
-		if (vm_req != VM_REQ_ACQUIRE)
-			return;
-	}
 
 	vm_ops = sde_vm_get_ops(sde_kms);
 	if (!vm_ops)
 		return;
 
+	crtc = sde_kms_vm_get_vm_crtc(state);
+	if (!crtc)
+		return;
+
+	new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
+	cstate = to_sde_crtc_state(new_cstate);
+	vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
+	if (vm_req != VM_REQ_ACQUIRE)
+		return;
+
 	sde_vm_lock(sde_kms);
 
 	if (vm_ops->vm_acquire_fail_handler)
@@ -2935,32 +2957,6 @@ static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
 	return 0;
 }
 
-static struct drm_display_mode *_sde_kms_get_splash_mode(
-		struct sde_kms *sde_kms, struct drm_connector *connector,
-		u32 display_idx)
-{
-	struct drm_display_mode *drm_mode = NULL, *curr_mode = NULL;
-	u32 i = 0, mode_index;
-
-	if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
-		/* currently consider modes[0] as the preferred mode */
-		curr_mode = list_first_entry(&connector->modes,
-				struct drm_display_mode, head);
-	} else if (sde_kms->hw_mdp && sde_kms->hw_mdp->ops.get_mode_index) {
-		mode_index = sde_kms->hw_mdp->ops.get_mode_index(
-				sde_kms->hw_mdp, display_idx);
-		list_for_each_entry(drm_mode, &connector->modes, head) {
-			if (mode_index == i) {
-				curr_mode = drm_mode;
-				break;
-			}
-			i++;
-		}
-	}
-
-	return curr_mode;
-}
-
 static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
 		struct dsi_display *dsi_display)
 {
@@ -3021,7 +3017,78 @@ static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
 	return 0;
 }
 
-static int sde_kms_cont_splash_config(struct msm_kms *kms)
+static int sde_kms_vm_trusted_cont_splash_res_init(struct sde_kms *sde_kms)
+{
+	int i;
+	void *display;
+	struct dsi_display *dsi_display;
+	struct drm_encoder *encoder;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	if (!sde_in_trusted_vm(sde_kms))
+		return 0;
+
+	for (i = 0; i < sde_kms->dsi_display_count; i++) {
+		display = sde_kms->dsi_displays[i];
+		dsi_display = (struct dsi_display *)display;
+
+		if (!dsi_display->bridge->base.encoder) {
+			SDE_ERROR("no encoder on dsi display:%d", i);
+			return -EINVAL;
+		}
+
+		encoder = dsi_display->bridge->base.encoder;
+		encoder->possible_crtcs = 1 << i;
+
+		SDE_DEBUG(
+		"dsi-display:%d encoder id[%d]=%d name=%s crtcs=%x\n", i,
+				encoder->index, encoder->base.id,
+				encoder->name, encoder->possible_crtcs);
+	}
+
+	return 0;
+}
+
+static struct drm_display_mode *_sde_kms_get_splash_mode(
+		struct sde_kms *sde_kms, struct drm_connector *connector,
+		struct drm_atomic_state *state)
+{
+	struct drm_display_mode *mode, *cur_mode = NULL;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_cstate, *old_cstate;
+	u32 i = 0;
+
+	if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
+		list_for_each_entry(mode, &connector->modes, head) {
+			if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+				cur_mode = mode;
+				break;
+			}
+		}
+	} else if (state) {
+		/* get the mode from first atomic_check phase for trusted_vm*/
+		for_each_oldnew_crtc_in_state(state, crtc, old_cstate,
+				new_cstate, i) {
+
+			if (!new_cstate->active && !old_cstate->active)
+				continue;
+
+			list_for_each_entry(mode, &connector->modes, head) {
+				if (drm_mode_equal(&new_cstate->mode, mode)) {
+					cur_mode = mode;
+					break;
+				}
+			}
+		}
+	}
+
+	return cur_mode;
+}
+
+static int sde_kms_cont_splash_config(struct msm_kms *kms,
+		struct drm_atomic_state *state)
 {
 	void *display;
 	struct dsi_display *dsi_display;
@@ -3050,6 +3117,12 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 		return -EINVAL;
 	}
 
+	rc = sde_kms_vm_trusted_cont_splash_res_init(sde_kms);
+	if (rc) {
+		SDE_ERROR("failed vm cont splash resource init, rc=%d", rc);
+		return -EINVAL;
+	}
+
 	if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
 		&& (!sde_kms->splash_data.num_splash_regions)) ||
 			!sde_kms->splash_data.num_splash_displays) {
@@ -3102,9 +3175,9 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 		encoder->crtc = priv->crtcs[i];
 		crtc = encoder->crtc;
 		splash_display->encoder =  encoder;
-
-		SDE_DEBUG("for dsi-display:%d crtc id = %d enc id =%d\n",
-				i, crtc->base.id, encoder->base.id);
+		SDE_DEBUG("for dsi-display:%d crtc id[%d]:%d enc id[%d]:%d\n",
+				i, crtc->index, crtc->base.id, encoder->index,
+				encoder->base.id);
 
 		mutex_lock(&dev->mode_config.mutex);
 		drm_connector_list_iter_begin(dev, &conn_iter);
@@ -3130,16 +3203,16 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 
 		crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
 
-		drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, i);
+		drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, state);
 		if (!drm_mode) {
-			SDE_ERROR("invalid drm-mode type:%d, index:%d\n",
-				sde_kms->splash_data.type, i);
+			SDE_ERROR("drm_mode not found; handoff_type:%d\n",
+					sde_kms->splash_data.type);
 			return -EINVAL;
 		}
-
-		SDE_DEBUG("drm_mode->name = %s, type=0x%x, flags=0x%x\n",
+		SDE_DEBUG(
+		  "drm_mode->name:%s, type:0x%x, flags:0x%x, handoff_type:%d\n",
 				drm_mode->name, drm_mode->type,
-				drm_mode->flags);
+				drm_mode->flags, sde_kms->splash_data.type);
 
 		/* Update CRTC drm structure */
 		crtc->state->active = true;
@@ -4521,7 +4594,8 @@ void sde_kms_vm_trusted_resource_deinit(struct sde_kms *sde_kms)
 	memset(&sde_kms->splash_data, 0, sizeof(struct sde_splash_data));
 }
 
-int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
+int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
+		struct drm_atomic_state *state)
 {
 	struct drm_device *dev;
 	struct msm_drm_private *priv;
@@ -4534,12 +4608,6 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
 		return -EINVAL;
 	}
 
-	if (sde_kms->dsi_display_count != 1) {
-		SDE_ERROR("no. of displays not supported:%d\n",
-				sde_kms->dsi_display_count);
-		return -EINVAL;
-	}
-
 	dev = sde_kms->dev;
 	priv = dev->dev_private;
 	sde_kms->splash_data.type = SDE_VM_HANDOFF;
@@ -4547,6 +4615,10 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
 
 	ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
 				&sde_kms->splash_data, sde_kms->catalog);
+	if (ret) {
+		SDE_ERROR("invalid cont splash init, ret:%d\n", ret);
+		return -EINVAL;
+	}
 
 	for (i = 0; i < sde_kms->dsi_display_count; i++) {
 		handoff_display = &sde_kms->splash_data.splash_display[i];
@@ -4558,7 +4630,13 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
 			dsi_display_set_active_state(display, true);
 	}
 
-	ret = sde_kms_cont_splash_config(&sde_kms->base);
+	if (sde_kms->splash_data.num_splash_displays != 1) {
+		SDE_ERROR("no. of displays not supported:%d\n",
+				sde_kms->splash_data.num_splash_displays);
+		goto error;
+	}
+
+	ret = sde_kms_cont_splash_config(&sde_kms->base, state);
 	if (ret) {
 		SDE_ERROR("error in setting handoff configs\n");
 		goto error;
@@ -4573,8 +4651,6 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
 	return 0;
 
 error:
-	sde_kms_vm_trusted_resource_deinit(sde_kms);
-
 	return ret;
 }
 
@@ -4593,18 +4669,24 @@ static int _sde_kms_register_events(struct msm_kms *kms,
 	}
 
 	sde_kms = to_sde_kms(kms);
-	vm_ops = sde_vm_get_ops(sde_kms);
-	sde_vm_lock(sde_kms);
-	if (vm_ops && vm_ops->vm_owns_hw && !vm_ops->vm_owns_hw(sde_kms)) {
-		sde_vm_unlock(sde_kms);
-		DRM_INFO("HW is owned by other VM\n");
-		return -EACCES;
-	}
 
+	/* check vm ownership, if event registration requires HW access */
 	switch (obj->type) {
 	case DRM_MODE_OBJECT_CRTC:
+		vm_ops = sde_vm_get_ops(sde_kms);
+		sde_vm_lock(sde_kms);
+
+		if (vm_ops && vm_ops->vm_owns_hw
+				&& !vm_ops->vm_owns_hw(sde_kms)) {
+			sde_vm_unlock(sde_kms);
+			SDE_DEBUG("HW is owned by other VM\n");
+			return -EACCES;
+		}
+
 		crtc = obj_to_crtc(obj);
 		ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
+
+		sde_vm_unlock(sde_kms);
 		break;
 	case DRM_MODE_OBJECT_CONNECTOR:
 		conn = obj_to_connector(obj);
@@ -4613,7 +4695,6 @@ static int _sde_kms_register_events(struct msm_kms *kms,
 		break;
 	}
 
-	sde_vm_unlock(sde_kms);
 	return ret;
 }
 

+ 3 - 1
msm/sde/sde_kms.h

@@ -704,9 +704,11 @@ int sde_kms_get_io_resources(struct sde_kms *kms, struct msm_io_res *io_res);
 /**
  * sde_kms_vm_trusted_resource_init - reserve/initialize the HW/SW resources
  * @sde_kms: poiner to sde_kms structure
+ * @state: current update atomic commit state
  * return: 0 on success; error code otherwise
  */
-int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms);
+int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
+		struct drm_atomic_state *state);
 
 /**
  * sde_kms_vm_trusted_resource_deinit - release the HW/SW resources

+ 9 - 0
msm/sde/sde_vm.h

@@ -166,6 +166,15 @@ struct sde_vm_ops {
 	 * @return - 0 on success, errorcode otherwise
 	 */
 	int (*vm_msg_send)(struct sde_vm *sde_vm, void *msg, size_t msg_size);
+
+	/**
+	 * vm_resource_init - hook to the handler when resource
+	 *                           accept/reclaim fails.
+	 * @sde_kms - handle to sde_kms
+	 * @state: current update atomic commit state
+	 */
+	int (*vm_resource_init)(struct sde_kms *sde_kms,
+			struct drm_atomic_state *state);
 };
 
 /**

+ 15 - 8
msm/sde/sde_vm_trusted.c

@@ -190,6 +190,18 @@ end:
 	return rc;
 }
 
+int _sde_vm_resource_init(struct sde_kms *sde_kms,
+		struct drm_atomic_state *state)
+{
+	int rc = 0;
+
+	rc = sde_kms_vm_trusted_resource_init(sde_kms, state);
+	if (rc)
+		SDE_ERROR("vm resource init failed\n");
+
+	return rc;
+}
+
 int _sde_vm_populate_res(struct sde_kms *sde_kms, struct sde_vm_trusted *vm)
 {
 	struct msm_io_res io_res;
@@ -384,18 +396,12 @@ static int _sde_vm_accept(struct sde_kms *kms)
 	if (rc)
 		goto res_accept_fail;
 
-	rc = sde_kms_vm_trusted_resource_init(kms);
-	if (rc) {
-		SDE_ERROR("vm resource init failed\n");
-		goto res_accept_fail;
-	}
-
-	goto end;
+	return 0;
 
 res_accept_fail:
 	_sde_vm_release_irq(kms->vm);
 	_sde_vm_release_mem(kms->vm);
-end:
+
 	return rc;
 }
 
@@ -414,6 +420,7 @@ static void _sde_vm_set_ops(struct sde_vm_ops *ops)
 	ops->vm_request_valid = sde_vm_request_valid;
 	ops->vm_acquire_fail_handler = _sde_vm_release;
 	ops->vm_msg_send = sde_vm_msg_send;
+	ops->vm_resource_init = _sde_vm_resource_init;
 }
 
 int sde_vm_trusted_init(struct sde_kms *kms)

+ 8 - 0
msm/sde_hdcp_2x.c

@@ -480,6 +480,12 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp)
 
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, hdcp->authenticated,
 					hdcp->app_data.response.data[0]);
+
+	if (atomic_read(&hdcp->hdcp_off)) {
+		pr_debug("invalid state, hdcp off\n");
+		goto exit;
+	}
+
 	switch (hdcp->app_data.response.data[0]) {
 	case SKE_SEND_TYPE_ID:
 		if (!hdcp2_app_comm(hdcp->hdcp2_ctx,
@@ -531,6 +537,8 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp)
 	}
 
 	sde_hdcp_2x_wakeup_client(hdcp, &cdata);
+
+exit:
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, hdcp->authenticated);
 }
 

+ 4 - 32
msm/sde_rsc.c

@@ -63,21 +63,6 @@
 static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
 static struct device *rpmh_dev[MAX_RSC_COUNT];
 
-static void sde_rsc_set_data_bus_mode(struct sde_power_handle *phandle, u32 tag)
-{
-	int i = 0, j = 0;
-
-	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
-		if (!phandle->data_bus_handle[i].bus_active_only)
-			continue;
-
-		for (j = 0; j < phandle->data_bus_handle[i].data_paths_cnt; j++)
-			icc_set_tag(phandle->data_bus_handle[i].data_bus_hdl[j],
-				    tag);
-
-	}
-}
-
 /**
  * sde_rsc_client_create() - create the client for sde rsc.
  * Different displays like DSI, HDMI, DP, WB, etc should call this
@@ -531,11 +516,8 @@ static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
-		if (!rc) {
+		if (!rc)
 			rpmh_mode_solver_set(rsc->rpmh_dev, true);
-			sde_rsc_set_data_bus_mode(&rsc->phandle,
-						  QCOM_ICC_TAG_WAKE);
-		}
 	}
 
 	/* vsync wait not needed during VID->CMD switch (rev 4+ HW only) */
@@ -594,11 +576,8 @@ static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc,
 
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
-		if (!rc) {
+		if (!rc)
 			rpmh_mode_solver_set(rsc->rpmh_dev, false);
-			sde_rsc_set_data_bus_mode(&rsc->phandle,
-						  QCOM_ICC_TAG_AMC);
-		}
 	}
 
 	/* indicate wait for vsync for cmd/vid to clk state switch */
@@ -684,13 +663,9 @@ static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
 
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
-		if (!rc) {
+		if (!rc)
 			rpmh_mode_solver_set(rsc->rpmh_dev,
 				rsc->version >= SDE_RSC_REV_3);
-			sde_rsc_set_data_bus_mode(&rsc->phandle,
-				rsc->version >= SDE_RSC_REV_3 ?
-				QCOM_ICC_TAG_WAKE : QCOM_ICC_TAG_AMC);
-		}
 	}
 
 	/* vsync wait not needed during CMD->VID switch (rev 4+ HW only) */
@@ -774,11 +749,8 @@ static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc,
 	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
 		rsc->post_poms = false;
-		if (!rc) {
+		if (!rc)
 			rpmh_mode_solver_set(rsc->rpmh_dev, true);
-			sde_rsc_set_data_bus_mode(&rsc->phandle,
-						  QCOM_ICC_TAG_WAKE);
-		}
 	}
 
 	return rc;