Prechádzať zdrojové kódy

Merge 2abf17e048f27f14d23fbc3512829b8006f4cc42 on remote branch

Change-Id: I588096a7b865d8be1f850f80d4141fef877df8cc
Linux Build Service Account 10 mesiacov pred
rodič
commit
6791773c64

+ 7 - 0
msm/dp/dp_display.c

@@ -2015,6 +2015,10 @@ static void dp_display_disconnect_work(struct work_struct *work)
 			struct dp_display_private, disconnect_work);
 
 	dp_display_handle_disconnect(dp, false);
+
+	if (dp->debug->sim_mode && dp_display_state_is(DP_STATE_ABORTED))
+		dp_display_host_deinit(dp);
+
 	dp->debug->abort(dp->debug);
 }
 
@@ -2030,6 +2034,9 @@ static int dp_display_usb_notifier(struct notifier_block *nb,
 		dp_display_state_add(DP_STATE_ABORTED);
 		dp->ctrl->abort(dp->ctrl, true);
 		dp->aux->abort(dp->aux, true);
+
+		dp->power->park_clocks(dp->power);
+
 		queue_work(dp->wq, &dp->disconnect_work);
 	}
 

+ 14 - 2
msm/dp/dp_power.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -33,6 +33,7 @@ struct dp_power_private {
 	bool strm1_clks_on;
 	bool strm0_clks_parked;
 	bool strm1_clks_parked;
+	bool link_clks_parked;
 };
 
 static int dp_power_regulator_init(struct dp_power_private *power)
@@ -316,6 +317,9 @@ static int dp_power_park_module(struct dp_power_private *power, enum dp_pm_type
 	} else if (module == DP_STREAM1_PM) {
 		clk = power->pixel1_clk_rcg;
 		parked = &power->strm1_clks_parked;
+	} else if (module == DP_LINK_PM) {
+		clk = power->link_clk_rcg;
+		parked = &power->link_clks_parked;
 	} else {
 		goto exit;
 	}
@@ -340,7 +344,7 @@ static int dp_power_park_module(struct dp_power_private *power, enum dp_pm_type
 		goto exit;
 	}
 
-	mp->clk_config->rate = XO_CLK_KHZ;
+	mp->clk_config->rate = XO_CLK_KHZ * 1000;
 	rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
 	if (rc) {
 		DP_ERR("failed to set clk rate.\n");
@@ -494,6 +498,8 @@ static int dp_power_clk_enable(struct dp_power *dp_power,
 		power->strm0_clks_parked = false;
 	if (pm_type == DP_STREAM1_PM)
 		power->strm1_clks_parked = false;
+	if (pm_type == DP_LINK_PM)
+		power->link_clks_parked = false;
 
 	/*
 	 * This log is printed only when user connects or disconnects
@@ -707,6 +713,12 @@ static int dp_power_park_clocks(struct dp_power *dp_power)
 		goto error;
 	}
 
+	rc = dp_power_park_module(power, DP_LINK_PM);
+	if (rc) {
+		DP_ERR("failed to park link clock. err=%d\n", rc);
+		goto error;
+	}
+
 error:
 	return rc;
 }

+ 1 - 1
msm/dsi/dsi_ctrl.c

@@ -467,7 +467,7 @@ static void dsi_ctrl_post_cmd_transfer_work(struct work_struct *work)
 	dsi_ctrl->post_tx_queued = false;
 }
 
-static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl)
+void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl)
 {
 	/*
 	 * If a command is triggered right after another command,

+ 8 - 1
msm/dsi/dsi_ctrl.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -952,4 +952,11 @@ void dsi_ctrl_toggle_error_interrupt_status(struct dsi_ctrl *dsi_ctrl, bool enab
  * @dsi_ctrl:                 DSI controller handle.
  */
 void dsi_ctrl_transfer_cleanup(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_flush_cmd_dma_queue() - Wait fot previous command transfer to complete
+ *				and flush any pending post_cmd_transfer works.
+ * @dsi_ctrl:                 DSI controller handle.
+ */
+void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl);
 #endif /* _DSI_CTRL_H_ */

+ 16 - 1
msm/dsi/dsi_display.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -3450,6 +3450,17 @@ int dsi_host_transfer_sub(struct mipi_dsi_host *host, struct dsi_cmd_desc *cmd)
 
 	dsi_display_set_cmd_tx_ctrl_flags(display, cmd);
 
+	/*
+	 * Wait until any previous broadcast commands with ASYNC waits have been scheduled
+	 * and completed on both controllers.
+	 */
+	display_for_each_ctrl(i, display) {
+		ctrl = &display->ctrl[i];
+		if ((ctrl->ctrl->pending_cmd_flags & DSI_CTRL_CMD_BROADCAST) &&
+			ctrl->ctrl->post_tx_queued)
+			dsi_ctrl_flush_cmd_dma_queue(ctrl->ctrl);
+	}
+
 	if (cmd->ctrl_flags & DSI_CTRL_CMD_BROADCAST) {
 		rc = dsi_display_broadcast_cmd(display, cmd);
 		if (rc) {
@@ -8594,6 +8605,10 @@ static int dsi_display_set_roi(struct dsi_display *display,
 		if (!changed)
 			continue;
 
+		rc = dsi_ctrl_wait_for_cmd_mode_mdp_idle(ctrl->ctrl);
+		if (rc)
+			DSI_ERR("wait for cmd mode mdp idle failed rc = %d", rc);
+
 		/* re-program the ctrl with the timing based on the new roi */
 		rc = dsi_ctrl_timing_setup(ctrl->ctrl);
 		if (rc) {

+ 15 - 11
msm/msm_atomic.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -117,11 +117,23 @@ static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
 	if (!old_conn_state || !old_conn_state->crtc)
 		return false;
 
+	if (!priv || !priv->kms || !priv->kms->funcs->get_msm_mode)
+		return false;
+
+	msm_mode = priv->kms->funcs->get_msm_mode(
+			_msm_get_conn_state(old_conn_state->crtc->state));
+	if (!msm_mode)
+		return false;
+
 	if (!old_conn_state->crtc->state->mode_changed &&
 			!old_conn_state->crtc->state->active_changed &&
 			old_conn_state->crtc->state->connectors_changed) {
-		if (old_conn_state->crtc == connector->state->crtc)
+		if (old_conn_state->crtc == connector->state->crtc) {
+			if (enable && msm_is_private_mode_changed(
+				_msm_get_conn_state(old_conn_state->crtc->state)))
+				return false;
 			return true;
+		}
 	}
 
 	if (enable)
@@ -131,14 +143,6 @@ static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
 		old_conn_state->crtc->state->connectors_changed)
 		return false;
 
-	if (!priv || !priv->kms || !priv->kms->funcs->get_msm_mode)
-		return false;
-
-	msm_mode = priv->kms->funcs->get_msm_mode(
-			_msm_get_conn_state(old_conn_state->crtc->state));
-	if (!msm_mode)
-		return false;
-
 	if (msm_is_mode_seamless(msm_mode) ||
 		msm_is_mode_seamless_vrr(msm_mode) ||
 		msm_is_mode_seamless_dyn_clk(msm_mode) ||
@@ -329,7 +333,7 @@ msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 		if (!new_crtc_state->mode_changed &&
 				new_crtc_state->connectors_changed) {
 			if (_msm_seamless_for_conn(connector,
-					old_conn_state, false))
+					old_conn_state, true))
 				continue;
 		} else if (!new_crtc_state->mode_changed) {
 			if (!msm_is_private_mode_changed(

+ 46 - 0
rotator/sde_rotator_core.c

@@ -2123,6 +2123,46 @@ static int sde_rotator_add_request(struct sde_rot_mgr *mgr,
 	return 0;
 }
 
+static void sde_rotator_complete_hwactive_job(struct sde_rot_mgr *mgr,
+		struct sde_rot_entry_container *req)
+{
+	struct kthread_work *commit_work;
+	struct kthread_work *done_work;
+	struct sde_rot_entry *entry;
+	struct sde_rot_hw_resource *hw;
+	struct sde_rot_queue *queue;
+	int i;
+
+	if (!mgr || !req) {
+		SDEROT_ERR("invalid params\n");
+		return;
+	}
+
+	for (i = 0; i < req->count; i++) {
+		entry = &req->entries[i];
+		if (!entry)
+			continue;
+
+		queue =	entry->commitq;
+		if (!queue || !queue->hw)
+			continue;
+
+		commit_work = &entry->commit_work;
+		done_work = &entry->done_work;
+		hw = queue->hw;
+		SDEROT_EVTLOG(req->count, atomic_read(&req->pending_count),
+			atomic_read(&hw->num_active));
+		if (atomic_read(&hw->num_active)) {
+			sde_rot_mgr_unlock(mgr);
+			kthread_flush_work(commit_work);
+			kthread_flush_work(done_work);
+			sde_rot_mgr_lock(mgr);
+		}
+		SDEROT_EVTLOG(req->count, atomic_read(&req->pending_count),
+			atomic_read(&hw->num_active));
+	}
+}
+
 void sde_rotator_remove_request(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
 	struct sde_rot_entry_container *req)
@@ -2146,6 +2186,11 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
 	struct sde_rot_entry *entry;
 	int i;
 
+	/*
+	 * Flush any active works before issuing
+	 * a cancel work.
+	 */
+	sde_rotator_complete_hwactive_job(mgr, req);
 	if (atomic_read(&req->pending_count)) {
 		/*
 		 * To avoid signal the rotation entry output fence in the wrong
@@ -2153,6 +2198,7 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
 		 * canceled first, before signaling the output fence.
 		 */
 		SDEROT_DBG("cancel work start\n");
+		SDEROT_EVTLOG(atomic_read(&req->pending_count));
 		sde_rot_mgr_unlock(mgr);
 		for (i = req->count - 1; i >= 0; i--) {
 			entry = req->entries + i;