Эх сурвалжийг харах

Merge remote-tracking branch 'origin/display-kernel.lnx.5.15' into display-kernel.lnx.1.0

* origin/display-kernel.lnx.5.15:
  disp: msm: update RSC bandwidth during solver mode transition
  disp: msm: sde: fix null pointer dereference issue
  disp: msm: dp: resend hpd notification to usermode
  disp: msm: dp: skip waits when processing usb disconnect in sim mode
  disp: msm: dp: free DP sim ports during DP sim disable
  Revert "disp: msm: dsi: increase cmd dma timeout to 1200 milliseconds"
  disp: msm: dp: add debug node to capture source and sink crc
  disp: msm: sde: use new connector state for topology checks
  disp: msm: uapi: increase SDE_FRAME_DATA_MAX_PLANES size
  disp: msm: sde: skip msm_lastclose if display is stuck in splash
  disp: msm: cancel all delayed_works before triggering msm_lastclose
  disp: msm: sde: bound event log traversal to allocated memory in coredump
  disp: msm: sde: add crtc width restriction when 3d-merge is enabled
  disp: msm: dsi: avoid DSI PHY shutdown during idle
  disp: msm: dsi: reorder various resets of DSI PHY
  disp: msm: dsi: increase cmd dma timeout to 1200 milliseconds
  disp: msm: sde: convert ubwc stats roi into blob property
  disp: msm: sde: update hw configs on dnsc_blur disable
  disp: msm: sde: add check to avoid NULL WB output fb
  disp: msm: dp: remove register call for regdump framework for DP domains
  disp: msm: sde: disable spr and demura for secondary panel in trusted vm
  disp: msm: dp: remove disconnect call for downstream port status change
  disp: msm: sde: use mode from new state during CP check phase
  disp: msm: sde: enable encoder resources before phys enc disable
  disp: msm: sde: reduce stack size in _sde_crtc_check_rois
  disp: msm: dp: address race condition in LM allocation

Change-Id: Ia2f228316d38721227b643cd7b25e740c8917c6c
Signed-off-by: Ashwin Pillai <[email protected]>
Ashwin Pillai 2 жил өмнө
parent
commit
4afb745c53

+ 1 - 1
include/uapi/display/drm/sde_drm.h

@@ -580,7 +580,7 @@ struct sde_drm_ubwc_stats_data {
  */
 #define SDE_FRAME_DATA_BUFFER_MAX	0x3
 #define SDE_FRAME_DATA_GUARD_BYTES	0xFF
-#define SDE_FRAME_DATA_MAX_PLANES	0x10
+#define SDE_FRAME_DATA_MAX_PLANES	0x14
 
 /**
  * struct sde_drm_frame_data_buffers_ctrl - control frame data buffers

+ 7 - 4
msm/dp/dp_audio.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -769,7 +770,7 @@ end:
 	return rc;
 }
 
-static int dp_audio_off(struct dp_audio *dp_audio)
+static int dp_audio_off(struct dp_audio *dp_audio, bool skip_wait)
 {
 	int rc = 0;
 	struct dp_audio_private *audio;
@@ -794,9 +795,11 @@ static int dp_audio_off(struct dp_audio *dp_audio)
 	if (work_pending)
 		DP_DEBUG("pending notification work completed\n");
 
-	rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT);
-	if (rc)
-		goto end;
+	if (!skip_wait) {
+		rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT);
+		if (rc)
+			goto end;
+	}
 
 	DP_DEBUG("success\n");
 end:

+ 3 - 1
msm/dp/dp_audio.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
@@ -41,10 +42,11 @@ struct dp_audio {
 	 * playback should be stopped on the external display.
 	 *
 	 * @dp_audio: an instance of struct dp_audio.
+	 * @skip_wait: flag to skip any waits
 	 *
 	 * Returns the error code in case of failure, 0 in success case.
 	 */
-	int (*off)(struct dp_audio *dp_audio);
+	int (*off)(struct dp_audio *dp_audio, bool skip_wait);
 };
 
 /**

+ 107 - 0
msm/dp/dp_catalog.c

@@ -1118,6 +1118,7 @@ static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel,
 	struct dp_catalog_private *catalog;
 	struct dp_io_data *io_data;
 	u32 strm_reg_off = 0, mainlink_ctrl;
+	u32 reg;
 
 	if (!panel) {
 		DP_ERR("invalid input\n");
@@ -1150,6 +1151,10 @@ static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel,
 		dp_write(MMSS_DP_ASYNC_FIFO_CONFIG, 0x01);
 	else
 		dp_write(MMSS_DP_ASYNC_FIFO_CONFIG, 0x00);
+
+	reg = dp_read(MMSS_DP_TIMING_ENGINE_EN);
+	reg |= BIT(8);
+	dp_write(MMSS_DP_TIMING_ENGINE_EN, reg);
 }
 
 static void dp_catalog_panel_config_dto(struct dp_catalog_panel *panel,
@@ -1419,6 +1424,77 @@ static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip)
 	wmb();
 }
 
+static int dp_catalog_ctrl_setup_misr(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	struct dp_io_data *io_data;
+	u32 val;
+
+	if (!ctrl) {
+		DP_ERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	catalog = dp_catalog_get_priv(ctrl);
+
+	io_data = catalog->io.dp_phy;
+	dp_write(DP_PHY_MISR_CTRL, 0x3);
+	/* make sure misr hw is reset */
+	wmb();
+	dp_write(DP_PHY_MISR_CTRL, 0x1);
+	/* make sure misr is brought out of reset */
+	wmb();
+
+	io_data = catalog->io.dp_link;
+	val = 1;	// frame count
+	val |= BIT(10); // clear status
+	val |= BIT(8);  // enable
+	dp_write(DP_MISR40_CTRL, val);
+	/* make sure misr control is applied */
+	wmb();
+
+	return 0;
+}
+
+static int dp_catalog_ctrl_read_misr(struct dp_catalog_ctrl *ctrl, struct dp_misr40_data *data)
+{
+	struct dp_catalog_private *catalog;
+	struct dp_io_data *io_data;
+	u32 val;
+	int i, j;
+	u32 addr;
+
+	if (!ctrl) {
+		DP_ERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	catalog = dp_catalog_get_priv(ctrl);
+
+	io_data = catalog->io.dp_phy;
+	val = dp_read(DP_PHY_MISR_STATUS);
+	if (!val) {
+		DP_WARN("phy misr not ready!");
+		return -EAGAIN;
+	}
+
+	addr = DP_PHY_MISR_TX0;
+	for (i = 0; i < 8; i++) {
+		data->phy_misr[i] = 0;
+		for (j = 0; j < 4; j++) {
+			val = dp_read(addr) & 0xff;
+			data->phy_misr[i] |= val << (j * 8);
+			addr += 4;
+		}
+	}
+
+	io_data = catalog->io.dp_link;
+	for (i = 0; i < 8; i++)
+		data->ctrl_misr[i] = dp_read(DP_MISR40_TX0 + (i * 4));
+
+	return 0;
+}
+
 static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel, u32 pattern)
 {
 	struct dp_catalog_private *catalog;
@@ -1627,6 +1703,34 @@ static bool dp_catalog_panel_dhdr_busy(struct dp_catalog_panel *panel)
 	return dp_flush & BIT(DP_DHDR_FLUSH) ? true : false;
 }
 
+static int dp_catalog_panel_get_src_crc(struct dp_catalog_panel *panel, u16 *crc)
+{
+	struct dp_catalog_private *catalog;
+	struct dp_io_data *io_data;
+	u32 offset;
+	u32 reg;
+
+	if (panel->stream_id >= DP_STREAM_MAX) {
+		DP_ERR("invalid stream_id:%d\n", panel->stream_id);
+		return -EINVAL;
+	}
+
+	catalog = dp_catalog_get_priv(panel);
+	io_data = catalog->io.dp_link;
+
+	if (panel->stream_id == DP_STREAM_0)
+		offset = MMSS_DP_PSR_CRC_RG;
+	else
+		offset = MMSS_DP1_CRC_RG;
+
+	reg = dp_read(offset); //GR
+	crc[0] = reg & 0xffff;
+	crc[1] = reg >> 16;
+	crc[2] = dp_read(offset + 4); //B
+
+	return 0;
+}
+
 static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
 {
 	u32 sw_reset;
@@ -2896,6 +3000,8 @@ struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser)
 		.fec_config = dp_catalog_ctrl_fec_config,
 		.mainlink_levels = dp_catalog_ctrl_mainlink_levels,
 		.late_phy_init = dp_catalog_ctrl_late_phy_init,
+		.setup_misr = dp_catalog_ctrl_setup_misr,
+		.read_misr = dp_catalog_ctrl_read_misr,
 	};
 	struct dp_catalog_hpd hpd = {
 		.config_hpd	= dp_catalog_hpd_config_hpd,
@@ -2925,6 +3031,7 @@ struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser)
 		.pps_flush = dp_catalog_panel_pps_flush,
 		.dhdr_flush = dp_catalog_panel_dhdr_flush,
 		.dhdr_busy = dp_catalog_panel_dhdr_busy,
+		.get_src_crc = dp_catalog_panel_get_src_crc,
 	};
 
 	if (!dev || !parser) {

+ 8 - 0
msm/dp/dp_catalog.h

@@ -46,6 +46,11 @@ struct dp_catalog_vsc_sdp_colorimetry {
 	u8 data[32];
 };
 
+struct dp_misr40_data {
+	u32 ctrl_misr[8];
+	u32 phy_misr[8];
+};
+
 struct dp_catalog_aux {
 	u32 data;
 	u32 isr;
@@ -103,6 +108,8 @@ struct dp_catalog_ctrl {
 
 	int (*late_phy_init)(struct dp_catalog_ctrl *ctrl,
 					u8 lane_cnt, bool flipped);
+	int (*setup_misr)(struct dp_catalog_ctrl *ctrl);
+	int (*read_misr)(struct dp_catalog_ctrl *ctrl, struct dp_misr40_data *data);
 };
 
 struct dp_catalog_hpd {
@@ -221,6 +228,7 @@ struct dp_catalog_panel {
 	void (*pps_flush)(struct dp_catalog_panel *panel);
 	void (*dhdr_flush)(struct dp_catalog_panel *panel);
 	bool (*dhdr_busy)(struct dp_catalog_panel *panel);
+	int (*get_src_crc)(struct dp_catalog_panel *panel, u16 *crc);
 };
 
 struct dp_catalog;

+ 27 - 0
msm/dp/dp_ctrl.c

@@ -1308,6 +1308,7 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
 	/* wait for link training completion before fec config as per spec */
 	dp_ctrl_fec_setup(ctrl);
 	dp_ctrl_dsc_setup(ctrl, panel);
+	panel->sink_crc_enable(panel, true);
 
 	return rc;
 }
@@ -1515,6 +1516,30 @@ void dp_ctrl_set_sim_mode(struct dp_ctrl *dp_ctrl, bool en)
 	DP_INFO("sim_mode=%d\n", ctrl->sim_mode);
 }
 
+int dp_ctrl_setup_misr(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return -EINVAL;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	return ctrl->catalog->setup_misr(ctrl->catalog);
+}
+
+int dp_ctrl_read_misr(struct dp_ctrl *dp_ctrl, struct dp_misr40_data *data)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return -EINVAL;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	return ctrl->catalog->read_misr(ctrl->catalog, data);
+}
+
 struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
 {
 	int rc = 0;
@@ -1565,6 +1590,8 @@ struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
 	dp_ctrl->stream_pre_off = dp_ctrl_stream_pre_off;
 	dp_ctrl->set_mst_channel_info = dp_ctrl_set_mst_channel_info;
 	dp_ctrl->set_sim_mode = dp_ctrl_set_sim_mode;
+	dp_ctrl->setup_misr = dp_ctrl_setup_misr;
+	dp_ctrl->read_misr = dp_ctrl_read_misr;
 
 	return dp_ctrl;
 error:

+ 3 - 0
msm/dp/dp_ctrl.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -32,6 +33,8 @@ struct dp_ctrl {
 			enum dp_stream_id strm,
 			u32 ch_start_slot, u32 ch_tot_slots);
 	void (*set_sim_mode)(struct dp_ctrl *dp_ctrl, bool en);
+	int (*setup_misr)(struct dp_ctrl *dp_ctrl);
+	int (*read_misr)(struct dp_ctrl *dp_ctrl, struct dp_misr40_data *data);
 };
 
 struct dp_ctrl_in {

+ 101 - 1
msm/dp/dp_debug.c

@@ -121,6 +121,8 @@ static void dp_debug_disable_sim_mode(struct dp_debug_private *debug,
 	debug->sim_mode &= ~mode_mask;
 	dp_sim_set_sim_mode(debug->sim_bridge, debug->sim_mode);
 
+	dp_sim_update_port_num(debug->sim_bridge, 0);
+
 	/* switch to normal mode */
 	if (!debug->sim_mode)
 		debug->aux->set_sim_mode(debug->aux, NULL);
@@ -333,7 +335,7 @@ static ssize_t dp_debug_read_dpcd(struct file *file,
 		}
 	}
 
-	len += scnprintf(buf + len , buf_size - len, "%04x: ", debug->dpcd_offset);
+	len += scnprintf(buf + len, buf_size - len, "%04x: ", debug->dpcd_offset);
 
 	while (offset < debug->dpcd_size)
 		len += scnprintf(buf + len, buf_size - len, "%02x ", dpcd[offset++]);
@@ -351,6 +353,90 @@ bail:
 	return len;
 }
 
+static ssize_t dp_debug_read_crc(struct file *file, char __user *user_buff, size_t count,
+		loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char *buf;
+	int const buf_size = SZ_4K;
+	u32 len = 0;
+	u16 src_crc[3] = {0};
+	u16 sink_crc[3] = {0};
+	struct dp_misr40_data misr40 = {0};
+	u32 retries = 2;
+	struct drm_connector *drm_conn;
+	struct sde_connector *sde_conn;
+	struct dp_panel *panel;
+	int i;
+	int rc;
+
+	if (!debug || !debug->aux)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&debug->lock);
+
+	if (!debug->panel || !debug->ctrl)
+		goto bail;
+
+	if (debug->panel->mst_state) {
+		drm_conn = drm_connector_lookup((*debug->connector)->dev, NULL, debug->mst_con_id);
+		if (!drm_conn) {
+			DP_ERR("connector %u not in mst list\n", debug->mst_con_id);
+			goto bail;
+		}
+
+		sde_conn = to_sde_connector(drm_conn);
+		panel = sde_conn->drv_panel;
+	} else {
+		panel = debug->panel;
+	}
+
+	panel->get_src_crc(panel, src_crc);
+	panel->get_sink_crc(panel, sink_crc);
+
+	len += scnprintf(buf + len, buf_size - len, "FRAME_CRC:\nSource vs Sink\n");
+
+	len += scnprintf(buf + len, buf_size - len, "CRC_R: %04X %04X\n", src_crc[0], sink_crc[0]);
+	len += scnprintf(buf + len, buf_size - len, "CRC_G: %04X %04X\n", src_crc[1], sink_crc[1]);
+	len += scnprintf(buf + len, buf_size - len, "CRC_B: %04X %04X\n", src_crc[2], sink_crc[2]);
+
+	debug->ctrl->setup_misr(debug->ctrl);
+
+	while (retries--) {
+		mutex_unlock(&debug->lock);
+		msleep(30);
+		mutex_lock(&debug->lock);
+
+		rc = debug->ctrl->read_misr(debug->ctrl, &misr40);
+		if (rc != -EAGAIN)
+			break;
+	}
+
+	len += scnprintf(buf + len, buf_size - len, "\nMISR40:\nCTLR vs PHY\n");
+	for (i = 0; i < 4; i++) {
+		len += scnprintf(buf + len, buf_size - len, "Lane%d %08X%08X %08X%08X\n", i,
+				misr40.ctrl_misr[2 * i], misr40.ctrl_misr[(2 * i) + 1],
+				misr40.phy_misr[2 * i], misr40.phy_misr[(2 * i) + 1]);
+	}
+
+	len = min_t(size_t, count, len);
+	if (!copy_to_user(user_buff, buf, len))
+		*ppos += len;
+
+bail:
+	mutex_unlock(&debug->lock);
+	kfree(buf);
+
+	return len;
+}
+
 static ssize_t dp_debug_write_hpd(struct file *file,
 		const char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -1871,6 +1957,11 @@ static const struct file_operations dpcd_fops = {
 	.read = dp_debug_read_dpcd,
 };
 
+static const struct file_operations crc_fops = {
+	.open = simple_open,
+	.read = dp_debug_read_crc,
+};
+
 static const struct file_operations connected_fops = {
 	.open = simple_open,
 	.read = dp_debug_read_connected,
@@ -2104,6 +2195,13 @@ static int dp_debug_init_sink_caps(struct dp_debug_private *debug,
 		return rc;
 	}
 
+	file = debugfs_create_file("crc", 0644, dir, debug, &crc_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		DP_ERR("[%s] debugfs crc failed, rc=%d\n", DEBUG_NAME, rc);
+		return rc;
+	}
+
 	return rc;
 }
 
@@ -2370,6 +2468,8 @@ static void dp_debug_abort(struct dp_debug *dp_debug)
 	debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
 
 	mutex_lock(&debug->lock);
+	// disconnect has already been handled. so clear hotplug
+	debug->hotplug = false;
 	dp_debug_set_sim_mode(debug, false);
 	mutex_unlock(&debug->lock);
 }

+ 102 - 73
msm/dp/dp_display.c

@@ -202,6 +202,7 @@ struct dp_display_private {
 	struct work_struct connect_work;
 	struct work_struct attention_work;
 	struct mutex session_lock;
+	struct mutex accounting_lock;
 	bool hdcp_delayed_off;
 	bool no_aux_switch;
 
@@ -209,6 +210,7 @@ struct dp_display_private {
 	struct dp_mst mst;
 
 	u32 tot_dsc_blks_in_use;
+	u32 tot_lm_blks_in_use;
 
 	bool process_hpd_connect;
 	struct dev_pm_qos_request pm_qos_req[NR_CPUS];
@@ -291,7 +293,7 @@ static void dp_audio_enable(struct dp_display_private *dp, bool enable)
 					dp->link->link_params.lane_count;
 				dp_panel->audio->on(dp_panel->audio);
 			} else {
-				dp_panel->audio->off(dp_panel->audio);
+				dp_panel->audio->off(dp_panel->audio, false);
 			}
 		}
 	}
@@ -915,7 +917,7 @@ static bool dp_display_send_hpd_event(struct dp_display_private *dp)
 	return true;
 }
 
-static int dp_display_send_hpd_notification(struct dp_display_private *dp)
+static int dp_display_send_hpd_notification(struct dp_display_private *dp, bool skip_wait)
 {
 	int ret = 0;
 	bool hpd = !!dp_display_state_is(DP_STATE_CONNECTED);
@@ -974,15 +976,25 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp)
 		goto skip_wait;
 	}
 
-	if (hpd && dp->mst.mst_active)
+	if (skip_wait || (hpd && dp->mst.mst_active))
 		goto skip_wait;
 
 	if (!dp->mst.mst_active &&
 			(!!dp_display_state_is(DP_STATE_ENABLED) == hpd))
 		goto skip_wait;
 
-	if (!wait_for_completion_timeout(&dp->notification_comp,
-						HZ * 5)) {
+	// wait 2 seconds
+	if (wait_for_completion_timeout(&dp->notification_comp, HZ * 2))
+		goto skip_wait;
+
+	//resend notification
+	if (dp->mst.mst_active)
+		dp->mst.cbs.hpd(&dp->dp_display, hpd);
+	else
+		dp_display_send_hpd_event(dp);
+
+	// wait another 3 seconds
+	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 3)) {
 		DP_WARN("%s timeout\n", hpd ? "connect" : "disconnect");
 		ret = -EINVAL;
 	}
@@ -1319,7 +1331,7 @@ end:
 	}
 
 	if (!rc && !dp_display_state_is(DP_STATE_ABORTED))
-		dp_display_send_hpd_notification(dp);
+		dp_display_send_hpd_notification(dp, false);
 
 skip_notify:
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state,
@@ -1327,7 +1339,7 @@ skip_notify:
 	return rc;
 }
 
-static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
+static void dp_display_process_mst_hpd_low(struct dp_display_private *dp, bool skip_wait)
 {
 	int rc = 0;
 
@@ -1344,7 +1356,7 @@ static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
 
 		if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
 				dp_display_state_is(DP_STATE_ENABLED)))
-			rc = dp_display_send_hpd_notification(dp);
+			rc = dp_display_send_hpd_notification(dp, skip_wait);
 
 		dp_display_set_mst_mgr_state(dp, false);
 		dp_display_update_mst_state(dp, false);
@@ -1353,7 +1365,7 @@ static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
 	DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active);
 }
 
-static int dp_display_process_hpd_low(struct dp_display_private *dp)
+static int dp_display_process_hpd_low(struct dp_display_private *dp, bool skip_wait)
 {
 	int rc = 0;
 
@@ -1362,11 +1374,11 @@ static int dp_display_process_hpd_low(struct dp_display_private *dp)
 	dp_audio_enable(dp, false);
 
 	if (dp->mst.mst_active) {
-		dp_display_process_mst_hpd_low(dp);
+		dp_display_process_mst_hpd_low(dp, skip_wait);
 	} else {
 		if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
 				dp_display_state_is(DP_STATE_ENABLED)))
-			rc = dp_display_send_hpd_notification(dp);
+			rc = dp_display_send_hpd_notification(dp, skip_wait);
 	}
 
 	mutex_lock(&dp->session_lock);
@@ -1483,6 +1495,25 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
 	return 0;
 }
 
+static void dp_display_clear_reservation(struct dp_display *dp, struct dp_panel *panel)
+{
+	struct dp_display_private *dp_display;
+
+	if (!dp || !panel) {
+		DP_ERR("invalid params\n");
+		return;
+	}
+
+	dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp_display->accounting_lock);
+
+	dp_display->tot_lm_blks_in_use -= panel->max_lm;
+	panel->max_lm = 0;
+
+	mutex_unlock(&dp_display->accounting_lock);
+}
+
 static void dp_display_clear_dsc_resources(struct dp_display_private *dp,
 		struct dp_panel *panel)
 {
@@ -1530,7 +1561,7 @@ static void dp_display_stream_disable(struct dp_display_private *dp,
 	dp->active_stream_cnt--;
 }
 
-static void dp_display_clean(struct dp_display_private *dp)
+static void dp_display_clean(struct dp_display_private *dp, bool skip_wait)
 {
 	int idx;
 	struct dp_panel *dp_panel;
@@ -1558,10 +1589,12 @@ static void dp_display_clean(struct dp_display_private *dp)
 
 		dp_panel = dp->active_panels[idx];
 		if (dp_panel->audio_supported)
-			dp_panel->audio->off(dp_panel->audio);
+			dp_panel->audio->off(dp_panel->audio, skip_wait);
 
-		dp_display_stream_pre_disable(dp, dp_panel);
+		if (!skip_wait)
+			dp_display_stream_pre_disable(dp, dp_panel);
 		dp_display_stream_disable(dp, dp_panel);
+		dp_display_clear_reservation(&dp->dp_display, dp_panel);
 		dp_panel->deinit(dp_panel, 0);
 	}
 
@@ -1571,12 +1604,12 @@ static void dp_display_clean(struct dp_display_private *dp)
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
 }
 
-static int dp_display_handle_disconnect(struct dp_display_private *dp)
+static int dp_display_handle_disconnect(struct dp_display_private *dp, bool skip_wait)
 {
 	int rc;
 
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
-	rc = dp_display_process_hpd_low(dp);
+	rc = dp_display_process_hpd_low(dp, skip_wait);
 	if (rc) {
 		/* cancel any pending request */
 		dp->ctrl->abort(dp->ctrl, true);
@@ -1585,10 +1618,12 @@ static int dp_display_handle_disconnect(struct dp_display_private *dp)
 
 	mutex_lock(&dp->session_lock);
 	if (dp_display_state_is(DP_STATE_ENABLED))
-		dp_display_clean(dp);
+		dp_display_clean(dp, skip_wait);
 
 	dp_display_host_unready(dp);
 
+	dp->tot_lm_blks_in_use = 0;
+
 	mutex_unlock(&dp->session_lock);
 
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
@@ -1624,7 +1659,7 @@ static void dp_display_disconnect_sync(struct dp_display_private *dp)
 	DP_DEBUG("disconnect delay = %d ms\n", disconnect_delay_ms);
 	msleep(disconnect_delay_ms);
 
-	dp_display_handle_disconnect(dp);
+	dp_display_handle_disconnect(dp, false);
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state,
 		disconnect_delay_ms);
 }
@@ -1738,15 +1773,16 @@ static void dp_display_attention_work(struct work_struct *work)
 
 	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
 		SDE_EVT32_EXTERNAL(dp->state, DS_PORT_STATUS_CHANGED);
-		if (dp_display_is_sink_count_zero(dp)) {
-			dp_display_handle_disconnect(dp);
-		} else {
-			/*
-			 * connect work should take care of sending
-			 * the HPD notification.
-			 */
-			if (!dp->mst.mst_active)
+		if (!dp->mst.mst_active) {
+			if (dp_display_is_sink_count_zero(dp)) {
+				dp_display_handle_disconnect(dp, false);
+			} else {
+				/*
+				 * connect work should take care of sending
+				 * the HPD notification.
+				 */
 				queue_work(dp->wq, &dp->connect_work);
+			}
 		}
 
 		goto mst_attention;
@@ -1754,7 +1790,7 @@ static void dp_display_attention_work(struct work_struct *work)
 
 	if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
 		SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_VIDEO_PATTERN);
-		dp_display_handle_disconnect(dp);
+		dp_display_handle_disconnect(dp, false);
 
 		dp->panel->video_test = true;
 		/*
@@ -1813,7 +1849,7 @@ cp_irq:
 		 * account for that. This is not needed if this
 		 * attention work was handling a test request
 		 */
-		dp_display_send_hpd_notification(dp);
+		dp_display_send_hpd_notification(dp, false);
 	}
 
 mst_attention:
@@ -1914,7 +1950,10 @@ static int dp_display_usb_notifier(struct notifier_block *nb,
 	SDE_EVT32_EXTERNAL(dp->state, dp->debug->sim_mode, action);
 	if (!action && dp->debug->sim_mode) {
 		DP_WARN("usb disconnected during simulation\n");
-		dp_display_disconnect_sync(dp);
+		dp_display_state_add(DP_STATE_ABORTED);
+		dp->ctrl->abort(dp->ctrl, true);
+		dp->aux->abort(dp->aux, true);
+		dp_display_handle_disconnect(dp, true);
 		dp->debug->abort(dp->debug);
 	}
 
@@ -1953,7 +1992,7 @@ int dp_display_mmrm_callback(struct mmrm_client_notifier_data *notifier_data)
 	if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE
 				&& dp_display_state_is(DP_STATE_ENABLED)
 				&& !dp_display_state_is(DP_STATE_ABORTED)) {
-		ret = dp_display_handle_disconnect(dp);
+		ret = dp_display_handle_disconnect(dp, false);
 		if (ret)
 			DP_ERR("mmrm callback error reducing clk, ret:%d\n", ret);
 	}
@@ -2002,6 +2041,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
 	};
 
 	mutex_init(&dp->session_lock);
+	mutex_init(&dp->accounting_lock);
 
 	dp->parser = dp_parser_get(dp->pdev);
 	if (IS_ERR(dp->parser)) {
@@ -2172,6 +2212,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
 
 	dp->cached_connector_status = connector_status_disconnected;
 	dp->tot_dsc_blks_in_use = 0;
+	dp->tot_lm_blks_in_use = 0;
 
 	dp->debug->hdcp_disabled = hdcp_disabled;
 	dp_display_update_hdcp_status(dp, true);
@@ -2214,37 +2255,6 @@ error:
 	return rc;
 }
 
-static void dp_display_dbg_reister(struct dp_display_private *dp)
-{
-	struct dp_parser *parser = dp->parser;
-	struct dss_io_data *io;
-
-	io = &parser->get_io(parser, "dp_ahb")->io;
-	if (io)
-		sde_dbg_reg_register_base("dp_ahb", io->base, io->len,
-				msm_get_phys_addr(dp->pdev, "dp_ahb"), SDE_DBG_DP);
-
-	io = &parser->get_io(parser, "dp_aux")->io;
-	if (io)
-		sde_dbg_reg_register_base("dp_aux", io->base, io->len,
-				msm_get_phys_addr(dp->pdev, "dp_aux"), SDE_DBG_DP);
-
-	io = &parser->get_io(parser, "dp_link")->io;
-	if (io)
-		sde_dbg_reg_register_base("dp_link", io->base, io->len,
-				msm_get_phys_addr(dp->pdev, "dp_link"), SDE_DBG_DP);
-
-	io = &parser->get_io(parser, "dp_p0")->io;
-	if (io)
-		sde_dbg_reg_register_base("dp_p0", io->base, io->len,
-				msm_get_phys_addr(dp->pdev, "dp_p0"), SDE_DBG_DP);
-
-	io = &parser->get_io(parser, "hdcp_physical")->io;
-	if (io)
-		sde_dbg_reg_register_base("hdcp_physical", io->base, io->len,
-				msm_get_phys_addr(dp->pdev, "hdcp_physical"), SDE_DBG_DP);
-}
-
 static int dp_display_post_init(struct dp_display *dp_display)
 {
 	int rc = 0;
@@ -2267,8 +2277,6 @@ static int dp_display_post_init(struct dp_display *dp_display)
 	if (rc)
 		goto end;
 
-	dp_display_dbg_reister(dp);
-
 	dp_display->post_init = NULL;
 end:
 	DP_DEBUG("%s\n", rc ? "failed" : "success");
@@ -2656,7 +2664,7 @@ static int dp_display_pre_disable(struct dp_display *dp_display, void *panel)
 
 clean:
 	if (dp_panel->audio_supported)
-		dp_panel->audio->off(dp_panel->audio);
+		dp_panel->audio->off(dp_panel->audio, false);
 
 	rc = dp_display_stream_pre_disable(dp, dp_panel);
 
@@ -2809,6 +2817,8 @@ static int dp_display_unprepare(struct dp_display *dp_display, void *panel)
 	/* log this as it results from user action of cable dis-connection */
 	DP_INFO("[OK]\n");
 end:
+	dp->tot_lm_blks_in_use -= dp_panel->max_lm;
+	dp_panel->max_lm = 0;
 	dp_panel->deinit(dp_panel, flags);
 	mutex_unlock(&dp->session_lock);
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
@@ -2867,11 +2877,14 @@ static int dp_display_validate_topology(struct dp_display_private *dp,
 	u32 num_lm = 0, num_dsc = 0, num_3dmux = 0;
 	bool dsc_capable = dp_mode->capabilities & DP_PANEL_CAPS_DSC;
 	u32 fps = dp_mode->timing.refresh_rate;
+	int avail_lm = 0;
+
+	mutex_lock(&dp->accounting_lock);
 
 	rc = msm_get_mixer_count(priv, mode, avail_res, &num_lm);
 	if (rc) {
 		DP_ERR("error getting mixer count. rc:%d\n", rc);
-		return rc;
+		goto end;
 	}
 
 	/* Merge using DSC, if enabled */
@@ -2879,7 +2892,7 @@ static int dp_display_validate_topology(struct dp_display_private *dp,
 		rc = msm_get_dsc_count(priv, mode->hdisplay, &num_dsc);
 		if (rc) {
 			DP_ERR("error getting dsc count. rc:%d\n", rc);
-			return rc;
+			goto end;
 		}
 
 		num_dsc = max(num_lm, num_dsc);
@@ -2889,7 +2902,8 @@ static int dp_display_validate_topology(struct dp_display_private *dp,
 					avail_res->num_lm);
 			/* Clear DSC caps and retry */
 			dp_mode->capabilities &= ~DP_PANEL_CAPS_DSC;
-			return -EAGAIN;
+			rc = -EAGAIN;
+			goto end;
 		} else {
 			/* Only DSCMERGE is supported on DP */
 			num_lm = num_dsc;
@@ -2900,24 +2914,36 @@ static int dp_display_validate_topology(struct dp_display_private *dp,
 		num_3dmux = 1;
 	}
 
-	if (num_lm > avail_res->num_lm) {
+	avail_lm = avail_res->num_lm + avail_res->num_lm_in_use - dp->tot_lm_blks_in_use;
+	if ((num_lm > dp_panel->max_lm) && (num_lm > avail_lm)) {
 		DP_DEBUG("mode %sx%d is invalid, not enough lm %d %d\n",
-				mode->name, fps, num_lm, num_lm, avail_res->num_lm);
-		return -EPERM;
+				mode->name, fps, num_lm, avail_res->num_lm);
+		rc = -EPERM;
+		goto end;
 	} else if (!num_dsc && (num_lm == dual && !num_3dmux)) {
 		DP_DEBUG("mode %sx%d is invalid, not enough 3dmux %d %d\n",
 				mode->name, fps, num_3dmux, avail_res->num_3dmux);
-		return -EPERM;
+		rc = -EPERM;
+		goto end;
 	} else if (num_lm == quad && num_dsc != quad)  {
 		DP_DEBUG("mode %sx%d is invalid, unsupported DP topology lm:%d dsc:%d\n",
 				mode->name, fps, num_lm, num_dsc);
-		return -EPERM;
+		rc = -EPERM;
+		goto end;
 	}
 
 	DP_DEBUG_V("mode %sx%d is valid, supported DP topology lm:%d dsc:%d 3dmux:%d\n",
 				mode->name, fps, num_lm, num_dsc, num_3dmux);
 
-	return 0;
+	dp->tot_lm_blks_in_use -= dp_panel->max_lm;
+	dp_panel->max_lm = num_lm > avail_res->num_lm_in_use ? max(dp_panel->max_lm, num_lm) : 0;
+	dp->tot_lm_blks_in_use += dp_panel->max_lm;
+
+	rc = 0;
+
+end:
+	mutex_unlock(&dp->accounting_lock);
+	return rc;
 }
 
 static enum drm_mode_status dp_display_validate_mode(
@@ -2973,6 +2999,8 @@ static enum drm_mode_status dp_display_validate_mode(
 
 	mode_status = MODE_OK;
 end:
+	if (mode_status != MODE_OK)
+		dp_display_clear_reservation(dp_display, dp_panel);
 	mutex_unlock(&dp->session_lock);
 
 	DP_DEBUG_V("[%s] mode is %s\n", mode->name,
@@ -3645,6 +3673,7 @@ static int dp_display_probe(struct platform_device *pdev)
 	g_dp_display->set_colorspace = dp_display_setup_colospace;
 	g_dp_display->get_available_dp_resources =
 					dp_display_get_available_dp_resources;
+	g_dp_display->clear_reservation = dp_display_clear_reservation;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {

+ 1 - 0
msm/dp/dp_display.h

@@ -105,6 +105,7 @@ struct dp_display {
 	int (*get_available_dp_resources)(struct dp_display *dp_display,
 			const struct msm_resource_caps_info *avail_res,
 			struct msm_resource_caps_info *max_dp_avail_res);
+	void (*clear_reservation)(struct dp_display *dp, struct dp_panel *panel);
 };
 
 void *get_ipc_log_context(void);

+ 3 - 0
msm/dp/dp_drm.c

@@ -257,6 +257,8 @@ static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
 
 	dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode,
 			&bridge->dp_mode);
+
+	dp->clear_reservation(dp, bridge->dp_panel);
 }
 
 static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
@@ -290,6 +292,7 @@ static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
 	dp = bridge->display;
 
 	dp->convert_to_dp_mode(dp, bridge->dp_panel, mode, &dp_mode);
+	dp->clear_reservation(dp, bridge->dp_panel);
 	convert_to_drm_mode(&dp_mode, adjusted_mode);
 end:
 	return ret;

+ 2 - 0
msm/dp/dp_link.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  *
  * Copyright (c) 2008 Keith Packard
@@ -38,6 +39,7 @@
 struct drm_dp_aux;
 
 #define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+#define DP_LINK_CAP_CRC (1 << 1)
 
 struct drm_dp_link {
 	unsigned char revision;

+ 2 - 0
msm/dp/dp_mst_drm.c

@@ -345,6 +345,7 @@ static bool dp_mst_bridge_mode_fixup(struct drm_bridge *drm_bridge,
 	dp = bridge->display;
 
 	dp->convert_to_dp_mode(dp, bridge_state->dp_panel, mode, &dp_mode);
+	dp->clear_reservation(dp, bridge_state->dp_panel);
 	convert_to_drm_mode(&dp_mode, adjusted_mode);
 
 	DP_MST_DEBUG("mst bridge [%d] mode:%s fixup\n", bridge->id, mode->name);
@@ -775,6 +776,7 @@ static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
 	memcpy(&bridge->drm_mode, adjusted_mode, sizeof(bridge->drm_mode));
 	dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode,
 			&bridge->dp_mode);
+	dp->clear_reservation(dp, dp_bridge_state->dp_panel);
 
 	DP_MST_INFO("mst bridge:%d conn:%d mode set complete %s\n", bridge->id,
 			DP_MST_CONN_ID(bridge), mode->name);

+ 4 - 0
msm/dp/dp_mst_sim.c

@@ -342,6 +342,10 @@ int dp_sim_update_port_num(struct dp_aux_bridge *bridge, u32 port_num)
 		return -EINVAL;
 
 	sim_dev = to_dp_sim_dev(bridge);
+	DP_INFO("Update port count from %d to %d\n", port_num, sim_dev->port_num);
+
+	if (sim_dev->port_num > port_num && sim_dev->ports)
+		sim_dev->port_num = port_num;
 
 	if (port_num > sim_dev->port_num) {
 		ports = devm_kzalloc(sim_dev->dev,

+ 8 - 5
msm/dp/dp_mst_sim_helper.c

@@ -1086,13 +1086,15 @@ int dp_mst_sim_update(void *mst_sim_context, u32 port_num,
 	kfree(ctx->ports);
 	ctx->port_num = 0;
 
-	ctx->ports = kcalloc(port_num, sizeof(*ports), GFP_KERNEL);
-	if (!ctx->ports) {
-		rc = -ENOMEM;
-		goto fail;
+	if (port_num) {
+		ctx->ports = kcalloc(port_num, sizeof(*ports), GFP_KERNEL);
+		if (!ctx->ports) {
+			rc = -ENOMEM;
+			goto fail;
+		}
+		ctx->port_num = port_num;
 	}
 
-	ctx->port_num = port_num;
 	for (i = 0; i < port_num; i++) {
 		ctx->ports[i] = ports[i];
 		if (ports[i].edid_size) {
@@ -1118,6 +1120,7 @@ fail:
 		for (i = 0; i < ctx->port_num; i++)
 			kfree(ctx->ports[i].edid);
 		kfree(ctx->ports);
+		ctx->port_num = 0;
 	}
 
 	mutex_unlock(&ctx->session_lock);

+ 71 - 0
msm/dp/dp_panel.c

@@ -1682,6 +1682,10 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func)
 	if (drm_dp_enhanced_frame_cap(dpcd))
 		link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
 
+	rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_TEST_SINK_MISC, &temp, 1);
+	if ((rlen == 1) && (temp & DP_TEST_CRC_SUPPORTED))
+		link_info->capabilities |= DP_LINK_CAP_CRC;
+
 	dfp_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] &
 						DP_DOWN_STREAM_PORT_COUNT;
 
@@ -3050,6 +3054,70 @@ static void dp_panel_update_pps(struct dp_panel *dp_panel, char *pps_cmd)
 	catalog->pps_flush(catalog);
 }
 
+int dp_panel_get_src_crc(struct dp_panel *dp_panel, u16 *crc)
+{
+	struct dp_catalog_panel *catalog;
+	struct dp_panel_private *panel;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	catalog = panel->catalog;
+	return catalog->get_src_crc(catalog, crc);
+}
+
+int dp_panel_get_sink_crc(struct dp_panel *dp_panel, u16 *crc)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+	struct drm_dp_aux *drm_aux;
+	u8 crc_bytes[6];
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	drm_aux = panel->aux->drm_aux;
+
+	/*
+	 * At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
+	 * per component (RGB or CrYCb).
+	 */
+	rc = drm_dp_dpcd_read(drm_aux, DP_TEST_CRC_R_CR, crc_bytes, 6);
+	if (rc < 0)
+		return rc;
+
+	rc = 0;
+	crc[0] = crc_bytes[0] | crc_bytes[1] << 8;
+	crc[1] = crc_bytes[2] | crc_bytes[3] << 8;
+	crc[2] = crc_bytes[4] | crc_bytes[5] << 8;
+
+	return rc;
+}
+
+int dp_panel_sink_crc_enable(struct dp_panel *dp_panel, bool enable)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+	struct drm_dp_aux *drm_aux;
+	ssize_t ret;
+	u8 buf;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	drm_aux = panel->aux->drm_aux;
+
+	if (dp_panel->link_info.capabilities & DP_LINK_CAP_CRC) {
+		ret = drm_dp_dpcd_readb(drm_aux, DP_TEST_SINK, &buf);
+		if (ret < 0)
+			return ret;
+
+		ret = drm_dp_dpcd_writeb(drm_aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+		if (ret < 0)
+			return ret;
+
+		drm_dp_dpcd_readb(drm_aux, DP_TEST_SINK, &buf);
+		DP_DEBUG("Enabled CRC: %x\n", buf);
+	}
+
+	return rc;
+}
+
 struct dp_panel *dp_panel_get(struct dp_panel_in *in)
 {
 	int rc = 0;
@@ -3122,6 +3190,9 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
 	dp_panel->read_mst_cap = dp_panel_read_mst_cap;
 	dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode;
 	dp_panel->update_pps = dp_panel_update_pps;
+	dp_panel->get_src_crc = dp_panel_get_src_crc;
+	dp_panel->get_sink_crc = dp_panel_get_sink_crc;
+	dp_panel->sink_crc_enable = dp_panel_sink_crc_enable;
 
 	sde_conn = to_sde_connector(dp_panel->connector);
 	sde_conn->drv_panel = dp_panel;

+ 4 - 0
msm/dp/dp_panel.h

@@ -137,6 +137,7 @@ struct dp_panel {
 	u32 pbn;
 
 	u32 dsc_blks_in_use;
+	u32 max_lm;
 	/* DRM connector assosiated with this panel */
 	struct drm_connector *connector;
 
@@ -192,6 +193,9 @@ struct dp_panel {
 		const struct drm_display_mode *drm_mode,
 		struct dp_display_mode *dp_mode);
 	void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd);
+	int (*sink_crc_enable)(struct dp_panel *dp_panel, bool enable);
+	int (*get_src_crc)(struct dp_panel *dp_panel, u16 *crc);
+	int (*get_sink_crc)(struct dp_panel *dp_panel, u16 *crc);
 };
 
 struct dp_tu_calc_input {

+ 14 - 0
msm/dp/dp_reg.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -86,9 +87,16 @@
 #define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000094)
 #define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000098)
 
+#define DP_MISR40_CTRL				(0x000000D0)
+#define DP_MISR40_TX0				(0x000000D4)
+#define DP_MISR40_TX1				(0x000000DC)
+#define DP_MISR40_TX2				(0x000000E4)
+#define DP_MISR40_TX3				(0x000000EC)
 #define MMSS_DP_PSR_CRC_RG			(0x00000154)
 #define MMSS_DP_PSR_CRC_B			(0x00000158)
 
+#define MMSS_DP1_CRC_RG				(0x00000164)
+#define MMSS_DP1_CRC_B				(0x00000168)
 #define DP_COMPRESSION_MODE_CTRL		(0x00000180)
 #define DP_PPS_HB_0_3				(0x00000184)
 #define DP_PPS_PB_0_3				(0x00000188)
@@ -384,6 +392,12 @@
 #define DP_PHY_AUX_INTERRUPT_STATUS_V420	(0x00D8)
 #define DP_PHY_AUX_INTERRUPT_STATUS_V600        (0x00E0)
 #define DP_PHY_SPARE0_V420			(0x00C8)
+#define DP_PHY_MISR_CTRL			(0x00C0)
+#define DP_PHY_MISR_STATUS			(0x010C)
+#define DP_PHY_MISR_TX0				(0x0110)
+#define DP_PHY_MISR_TX1				(0x0130)
+#define DP_PHY_MISR_TX2				(0x0150)
+#define DP_PHY_MISR_TX3				(0x0170)
 #define TXn_TX_DRV_LVL_V420			(0x0014)
 #define TXn_TRANSCEIVER_BIAS_EN_V420		(0x0054)
 #define TXn_HIGHZ_DRVR_EN_V420			(0x0058)

+ 2 - 1
msm/dsi/dsi_catalog.h

@@ -314,7 +314,8 @@ void dsi_phy_hw_v5_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
 
 int dsi_phy_hw_v5_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
 				      u32 *dst, u32 size);
-void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy,
+				struct dsi_phy_cfg *cfg);
 void dsi_ctrl_hw_22_configure_cmddma_window(struct dsi_ctrl_hw *ctrl,
 		struct dsi_ctrl_cmd_dma_info *cmd,
 		u32 line_no, u32 window);

+ 1 - 4
msm/dsi/dsi_phy.c

@@ -1167,11 +1167,8 @@ int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable)
 	} else {
 		phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
 
-		if (phy->hw.ops.disable)
-			phy->hw.ops.disable(&phy->hw, &phy->cfg);
-
 		if (phy->hw.ops.phy_idle_off)
-			phy->hw.ops.phy_idle_off(&phy->hw);
+			phy->hw.ops.phy_idle_off(&phy->hw, &phy->cfg);
 	}
 	mutex_unlock(&phy->phy_lock);
 

+ 3 - 1
msm/dsi/dsi_phy_hw.h

@@ -291,8 +291,10 @@ struct dsi_phy_hw_ops {
 	/**
 	 * phy_idle_off() - Disable PHY hardware when exiting idle screen
 	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @cfg:      Per lane configurations for timing, strength and lane
+	 *	      configurations.
 	 */
-	void (*phy_idle_off)(struct dsi_phy_hw *phy);
+	void (*phy_idle_off)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
 
 	/**
 	 * calculate_timing_params() - calculates timing parameters.

+ 14 - 1
msm/dsi/dsi_phy_hw_v5_0.c

@@ -880,10 +880,23 @@ void dsi_phy_hw_v5_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable)
 	wmb(); /* make sure request is set */
 }
 
-void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy)
+void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy,
+					struct dsi_phy_cfg *cfg)
 {
+	if (dsi_phy_hw_v5_0_is_pll_on(phy))
+		DSI_PHY_WARN(phy, "Turning OFF PHY while PLL is on\n");
+
 	/* enable clamping of PADS */
 	DSI_W32(phy, DSIPHY_CMN_CTRL_4, 0x1);
 	DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x0);
 	wmb();
+
+	dsi_phy_hw_v5_0_config_lpcdrx(phy, cfg, false);
+
+	/* Turn off REFGEN Vote */
+	DSI_W32(phy, DSIPHY_CMN_GLBL_DIGTOP_SPARE10, 0x0);
+	/* make sure request is set */
+	wmb();
+	/* Delay to ensure HW removes vote*/
+	udelay(2);
 }

+ 44 - 12
msm/dsi/dsi_pll_4nm.c

@@ -597,11 +597,11 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_resource *rsc)
 	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5) | BIT(4)));
 }
 
-static void dsi_pll_phy_dig_reset(struct dsi_pll_resource *rsc)
+static void dsi_pll_phy_analog_reset(struct dsi_pll_resource *rsc)
 {
 	/*
-	 * Reset the PHY digital domain. This would be needed when
-	 * coming out of a CX or analog rail power collapse while
+	 * Reset the PHY analog domain. This would be needed when
+	 * coming out of a 0p9 power collapse while
 	 * ensuring that the pads maintain LP00 or LP11 state
 	 */
 	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
@@ -1391,15 +1391,6 @@ static int dsi_pll_4nm_enable(struct dsi_pll_resource *rsc)
 		goto error;
 	}
 
-	/*
-	 * assert power on reset for PHY digital in case the PLL is
-	 * enabled after CX of analog domain power collapse. This needs
-	 * to be done before enabling the global clk.
-	 */
-	dsi_pll_phy_dig_reset(rsc);
-	if (rsc->slave)
-		dsi_pll_phy_dig_reset(rsc->slave);
-
 	dsi_pll_enable_global_clk(rsc);
 	if (rsc->slave)
 		dsi_pll_enable_global_clk(rsc->slave);
@@ -1436,12 +1427,53 @@ static int dsi_pll_4nm_disable(struct dsi_pll_resource *rsc)
 	return rc;
 }
 
+void dsi_pll_assert_pll_reset(struct dsi_pll_resource *rsc)
+{
+	u32 data = 0;
+
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_1, data | BIT(7));
+
+	/* Ensure Assert is through */
+	wmb();
+
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_1, data & ~BIT(7));
+
+	/* Ensure deassert is through */
+	wmb();
+}
+
+void dsi_pll_4nm_trigger_resets_pre_enable(struct dsi_pll_resource *rsc)
+{
+	/*
+	 * Assert power on reset on DSI PHY Analog immeditately
+	 * after 0P9 resume to make sure PHY starts in a
+	 * clean state
+	 */
+	dsi_pll_phy_analog_reset(rsc);
+	if (rsc->slave)
+		dsi_pll_phy_analog_reset(rsc->slave);
+
+	/*
+	 * Trigger PLL reset as well to clear out any jitter
+	 * introduced as result of 0p9 collapse
+	 */
+	dsi_pll_assert_pll_reset(rsc);
+	if (rsc->slave)
+		dsi_pll_assert_pll_reset(rsc->slave);
+}
+
 int dsi_pll_4nm_configure(void *pll, bool commit)
 {
 
 	int rc = 0;
 	struct dsi_pll_resource *rsc = (struct dsi_pll_resource *)pll;
 
+	/* These resets are needed for resetting Analog and PLL portions
+	 * of DSI PHY before PLL is enabled and locked
+	 */
+	if (commit)
+		dsi_pll_4nm_trigger_resets_pre_enable(rsc);
+
 	dsi_pll_config_slave(rsc);
 
 	/* PLL power needs to be enabled before accessing PLL registers */

+ 1 - 0
msm/dsi/dsi_pll_4nm.h

@@ -167,6 +167,7 @@
 #define PHY_CMN_GLBL_CTRL	0x018
 #define PHY_CMN_RBUF_CTRL	0x01C
 #define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_1          0x028
 #define PHY_CMN_CTRL_2		0x02C
 #define PHY_CMN_CTRL_3		0x030
 #define PHY_CMN_PLL_CNTRL	0x03C

+ 22 - 3
msm/msm_drv.c

@@ -1040,6 +1040,25 @@ mdss_init_fail:
 	return ret;
 }
 
+void msm_atomic_flush_display_threads(struct msm_drm_private *priv)
+{
+	int i;
+
+	if (!priv) {
+		SDE_ERROR("invalid private data\n");
+		return;
+	}
+
+	for (i = 0; i < priv->num_crtcs; i++) {
+		if (priv->disp_thread[i].thread)
+			kthread_flush_worker(&priv->disp_thread[i].worker);
+		if (priv->event_thread[i].thread)
+			kthread_flush_worker(&priv->event_thread[i].worker);
+	}
+
+	kthread_flush_worker(&priv->pp_event_worker);
+}
+
 /*
  * DRM operations:
  */
@@ -1138,10 +1157,8 @@ static void msm_lastclose(struct drm_device *dev)
 				priv->pending_crtcs);
 
 		rc = kms->funcs->trigger_null_flush(kms);
-		if (rc) {
-			DRM_ERROR("null flush commit failure during lastclose\n");
+		if (rc)
 			return;
-		}
 	}
 
 	/*
@@ -1165,6 +1182,8 @@ static void msm_lastclose(struct drm_device *dev)
 		DRM_INFO("wait for crtc mask 0x%x failed, commit anyway...\n",
 				priv->pending_crtcs);
 
+	msm_atomic_flush_display_threads(priv);
+
 	if (priv->fbdev) {
 		rc = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
 		if (rc)

+ 5 - 1
msm/msm_drv.h

@@ -117,6 +117,7 @@ enum msm_mdp_plane_property {
 	PLANE_PROP_DMA_GC,
 	PLANE_PROP_FP16_GC,
 	PLANE_PROP_FP16_CSC,
+	PLANE_PROP_UBWC_STATS_ROI,
 
 	/* # of blob properties */
 	PLANE_PROP_BLOBCOUNT,
@@ -140,7 +141,6 @@ enum msm_mdp_plane_property {
 	PLANE_PROP_INVERSE_PMA,
 	PLANE_PROP_FP16_IGC,
 	PLANE_PROP_FP16_UNMULT,
-	PLANE_PROP_UBWC_STATS_ROI,
 
 	/* enum/bitmask properties */
 	PLANE_PROP_BLEND_OP,
@@ -828,6 +828,7 @@ struct msm_mode_info {
 
 /**
  * struct msm_resource_caps_info - defines hw resources
+ * @num_lm_in_use       number of layer mixers allocated to a specified encoder
  * @num_lm              number of layer mixers available
  * @num_dsc             number of dsc available
  * @num_vdc             number of vdc available
@@ -836,6 +837,7 @@ struct msm_mode_info {
  * @max_mixer_width:    max width supported by layer mixer
  */
 struct msm_resource_caps_info {
+	uint32_t num_lm_in_use;
 	uint32_t num_lm;
 	uint32_t num_dsc;
 	uint32_t num_vdc;
@@ -1113,6 +1115,8 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
+void msm_atomic_flush_display_threads(struct msm_drm_private *priv);
+
 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, int npages);
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,

+ 21 - 14
msm/sde/sde_color_processing.c

@@ -896,13 +896,15 @@ static int _set_spr_init_feature(struct sde_hw_dspp *hw_dspp,
 {
 	int ret = 0;
 
-	if (!sde_crtc || !hw_dspp || !hw_dspp->ops.setup_spr_init_config) {
+	if (!sde_crtc || !hw_dspp) {
 		DRM_ERROR("invalid arguments\n");
 		ret = -EINVAL;
 	} else {
-		hw_dspp->ops.setup_spr_init_config(hw_dspp, hw_cfg);
-		_update_pu_feature_enable(sde_crtc, SDE_CP_CRTC_DSPP_SPR_PU,
+		if (hw_dspp->ops.setup_spr_init_config) {
+			hw_dspp->ops.setup_spr_init_config(hw_dspp, hw_cfg);
+			_update_pu_feature_enable(sde_crtc, SDE_CP_CRTC_DSPP_SPR_PU,
 				hw_cfg->payload != NULL);
+		}
 	}
 
 	return ret;
@@ -914,12 +916,14 @@ static int _set_demura_feature(struct sde_hw_dspp *hw_dspp,
 {
 	int ret = 0;
 
-	if (!hw_dspp || !hw_dspp->ops.setup_demura_cfg) {
+	if (!hw_dspp) {
 		ret = -EINVAL;
 	} else {
-		hw_dspp->ops.setup_demura_cfg(hw_dspp, hw_cfg);
-		_update_pu_feature_enable(sde_crtc, SDE_CP_CRTC_DSPP_DEMURA_PU,
+		if (hw_dspp->ops.setup_demura_cfg) {
+			hw_dspp->ops.setup_demura_cfg(hw_dspp, hw_cfg);
+			_update_pu_feature_enable(sde_crtc, SDE_CP_CRTC_DSPP_DEMURA_PU,
 				hw_cfg->payload != NULL);
+		}
 	}
 
 	return ret;
@@ -1617,6 +1621,8 @@ static int _sde_cp_crtc_checkfeature(u32 feature,
 
 	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
 	hw_cfg.last_feature = 0;
+	hw_cfg.panel_width = sde_crtc_state->base.adjusted_mode.hdisplay;
+	hw_cfg.panel_height = sde_crtc_state->base.adjusted_mode.vdisplay;
 
 	for (i = 0; i < num_mixers; i++) {
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
@@ -1625,6 +1631,7 @@ static int _sde_cp_crtc_checkfeature(u32 feature,
 		hw_cfg.dspp[i] = hw_dspp;
 	}
 
+	SDE_EVT32(feature, hw_cfg.panel_width, hw_cfg.panel_height);
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
@@ -1639,8 +1646,6 @@ static int _sde_cp_crtc_checkfeature(u32 feature,
 		hw_cfg.displayh = num_mixers *
 				sde_crtc_state->lm_roi[i].w;
 		hw_cfg.displayv = sde_crtc_state->lm_roi[i].h;
-		hw_cfg.panel_width = sde_crtc->base.state->adjusted_mode.hdisplay;
-		hw_cfg.panel_height = sde_crtc->base.state->adjusted_mode.vdisplay;
 		DRM_DEBUG_DRIVER("check cp feature %d on mixer %d\n",
 				feature, hw_lm->idx - LM_0);
 		ret = check_feature(hw_dspp, &hw_cfg, sde_crtc);
@@ -1685,7 +1690,7 @@ static void _sde_cp_crtc_commit_feature(struct sde_cp_node *prop_node,
 
 	hw_cfg.num_ds_enabled = sde_crtc_state->num_ds_enabled;
 
-	SDE_EVT32(hw_cfg.panel_width, hw_cfg.panel_height);
+	SDE_EVT32(prop_node->feature, hw_cfg.panel_width, hw_cfg.panel_height);
 
 	for (i = 0; i < num_mixers; i++) {
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
@@ -1823,7 +1828,7 @@ static void _sde_cp_dspp_flush_helper(struct sde_crtc *sde_crtc, u32 feature)
 	for (i = 0; i < num_mixers; i++) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		dspp = sde_crtc->mixers[i].hw_dspp;
-		if (ctl && ctl->ops.update_bitmask_dspp_subblk) {
+		if (ctl && dspp && ctl->ops.update_bitmask_dspp_subblk) {
 			if (feature == SDE_CP_CRTC_DSPP_SB) {
 				if (!dspp->sb_dma_in_use)
 					continue;
@@ -1888,6 +1893,9 @@ static int _sde_cp_crtc_check_pu_features(struct drm_crtc *crtc)
 	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
 	hw_cfg.payload = &sde_crtc_state->user_roi_list;
 	hw_cfg.len = sizeof(sde_crtc_state->user_roi_list);
+	hw_cfg.panel_height = sde_crtc_state->base.adjusted_mode.vdisplay;
+	hw_cfg.panel_width = sde_crtc_state->base.adjusted_mode.hdisplay;
+
 	for (i = 0; i < hw_cfg.num_of_mixers; i++)
 		hw_cfg.dspp[i] = sde_crtc->mixers[i].hw_dspp;
 
@@ -1899,6 +1907,7 @@ static int _sde_cp_crtc_check_pu_features(struct drm_crtc *crtc)
 				!(sde_crtc->cp_pu_feature_mask & BIT(i)))
 			continue;
 
+		SDE_EVT32(i, hw_cfg.panel_width, hw_cfg.panel_height);
 		for (j = 0; j < hw_cfg.num_of_mixers; j++) {
 			hw_dspp = sde_crtc->mixers[j].hw_dspp;
 
@@ -1908,8 +1917,7 @@ static int _sde_cp_crtc_check_pu_features(struct drm_crtc *crtc)
 			hw_cfg.displayh = hw_cfg.num_of_mixers *
 					sde_crtc_state->lm_roi[j].w;
 			hw_cfg.displayv = sde_crtc_state->lm_roi[j].h;
-			hw_cfg.panel_height = sde_crtc->base.state->adjusted_mode.vdisplay;
-			hw_cfg.panel_width = sde_crtc->base.state->adjusted_mode.hdisplay;
+
 			ret = check_pu_feature(hw_dspp, &hw_cfg, sde_crtc);
 			if (ret) {
 				DRM_ERROR("failed pu feature %d in mixer %d\n",
@@ -2062,6 +2070,7 @@ static int _sde_cp_crtc_update_pu_features(struct drm_crtc *crtc, bool *need_flu
 				!(sde_crtc->cp_pu_feature_mask & BIT(i)))
 			continue;
 
+		SDE_EVT32(i, hw_cfg.panel_width, hw_cfg.panel_height);
 		for (j = 0; j < hw_cfg.num_of_mixers; j++) {
 			hw_lm = sde_crtc->mixers[j].hw_lm;
 			hw_dspp = sde_crtc->mixers[j].hw_dspp;
@@ -2071,8 +2080,6 @@ static int _sde_cp_crtc_update_pu_features(struct drm_crtc *crtc, bool *need_flu
 			hw_cfg.displayh = hw_cfg.num_of_mixers *
 					hw_lm->cfg.out_width;
 			hw_cfg.displayv = hw_lm->cfg.out_height;
-			hw_cfg.panel_width = sde_crtc->base.state->adjusted_mode.hdisplay;
-			hw_cfg.panel_height = sde_crtc->base.state->adjusted_mode.vdisplay;
 
 			ret = set_pu_feature(hw_dspp, &hw_cfg, sde_crtc);
 			/* feature does not need flush when ret > 0 */

+ 27 - 6
msm/sde/sde_connector.h

@@ -1217,24 +1217,45 @@ static inline int sde_connector_state_get_compression_info(
 	return 0;
 }
 
-static inline bool sde_connector_is_3d_merge_enabled(struct drm_connector *conn)
+static inline bool sde_connector_is_quadpipe_3d_merge_enabled(
+		struct drm_connector_state *conn_state)
 {
 	enum sde_rm_topology_name topology;
 
-	if (!conn)
+	if (!conn_state)
 		return false;
 
-	topology = sde_connector_get_topology_name(conn);
+	topology = sde_connector_get_property(conn_state, CONNECTOR_PROP_TOPOLOGY_NAME);
+	if ((topology == SDE_RM_TOPOLOGY_QUADPIPE_3DMERGE)
+			|| (topology == SDE_RM_TOPOLOGY_QUADPIPE_3DMERGE_DSC))
+		return true;
+
+	return false;
+}
+
+static inline bool sde_connector_is_dualpipe_3d_merge_enabled(
+		struct drm_connector_state *conn_state)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!conn_state)
+		return false;
+
+	topology = sde_connector_get_property(conn_state, CONNECTOR_PROP_TOPOLOGY_NAME);
 	if ((topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE)
 			|| (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC)
-			|| (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_VDC)
-			|| (topology == SDE_RM_TOPOLOGY_QUADPIPE_3DMERGE)
-			|| (topology == SDE_RM_TOPOLOGY_QUADPIPE_3DMERGE_DSC))
+			|| (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_VDC))
 		return true;
 
 	return false;
 }
 
+static inline bool sde_connector_is_3d_merge_enabled(struct drm_connector_state *conn_state)
+{
+	return sde_connector_is_dualpipe_3d_merge_enabled(conn_state)
+		|| sde_connector_is_quadpipe_3d_merge_enabled(conn_state);
+}
+
 /**
 * sde_connector_set_msm_mode - set msm_mode for connector state
 * @conn_state: Pointer to drm connector state structure

+ 47 - 23
msm/sde/sde_crtc.c

@@ -1360,15 +1360,19 @@ static int _sde_crtc_check_rois(struct drm_crtc *crtc,
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_crtc_state;
-	struct msm_mode_info mode_info;
+	struct msm_mode_info *mode_info;
 	u32 crtc_width, crtc_height, mixer_width, mixer_height;
 	struct drm_display_mode *adj_mode;
-	int rc, lm_idx, i;
+	int rc = 0, lm_idx, i;
+	struct drm_connector *conn;
+	struct drm_connector_state *conn_state;
 
 	if (!crtc || !state)
 		return -EINVAL;
 
-	memset(&mode_info, 0, sizeof(mode_info));
+	mode_info = kzalloc(sizeof(struct msm_mode_info), GFP_KERNEL);
+	if (!mode_info)
+		return -ENOMEM;
 
 	sde_crtc = to_sde_crtc(crtc);
 	sde_crtc_state = to_sde_crtc_state(state);
@@ -1377,13 +1381,27 @@ static int _sde_crtc_check_rois(struct drm_crtc *crtc,
 	sde_crtc_get_resolution(crtc, state, adj_mode, &crtc_width, &crtc_height);
 	sde_crtc_get_mixer_resolution(crtc, state, adj_mode, &mixer_width, &mixer_height);
 	/* check cumulative mixer w/h is equal full crtc w/h */
-	if (sde_crtc->num_mixers
-			&& (((mixer_width * sde_crtc->num_mixers) != crtc_width)
+	if (sde_crtc->num_mixers && (((mixer_width * sde_crtc->num_mixers) != crtc_width)
 				|| (mixer_height != crtc_height))) {
 		SDE_ERROR("%s: invalid w/h crtc:%d,%d, mixer:%d,%d, num_mixers:%d\n",
 				sde_crtc->name, crtc_width, crtc_height, mixer_width, mixer_height,
 				sde_crtc->num_mixers);
-		return -EINVAL;
+		rc = -EINVAL;
+		goto end;
+	} else if (state->state) {
+		for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+			if (conn_state && (conn_state->crtc == crtc)
+				&& ((sde_connector_is_dualpipe_3d_merge_enabled(conn_state)
+						&& (crtc_width % 4))
+					|| (sde_connector_is_quadpipe_3d_merge_enabled(conn_state)
+							&& (crtc_width % 8)))) {
+				SDE_ERROR(
+				  "%s: invalid 3d-merge_w - mixer_w:%d, crtc_w:%d, num_mixers:%d\n",
+					sde_crtc->name, mixer_width,
+					crtc_width, sde_crtc->num_mixers);
+				return -EINVAL;
+			}
+		}
 	}
 
 	/*
@@ -1396,54 +1414,58 @@ static int _sde_crtc_check_rois(struct drm_crtc *crtc,
 		if (!conn || !conn->state)
 			continue;
 
-		rc = sde_connector_state_get_mode_info(conn->state, &mode_info);
+		rc = sde_connector_state_get_mode_info(conn->state, mode_info);
 		if (rc) {
 			SDE_ERROR("failed to get mode info\n");
-			return -EINVAL;
+			rc =  -EINVAL;
+			goto end;
 		}
 
-		if (sde_connector_is_3d_merge_enabled(conn) && (mixer_width % 2)) {
+		if (sde_connector_is_3d_merge_enabled(conn->state) && (mixer_width % 2)) {
 			SDE_ERROR(
 			  "%s: invalid width w/ 3d-merge - mixer_w:%d, crtc_w:%d, num_mixers:%d\n",
 				sde_crtc->name, crtc_width, mixer_width, sde_crtc->num_mixers);
-			return -EINVAL;
+			rc = -EINVAL;
+			goto end;
 		}
 
-		if (!mode_info.roi_caps.enabled)
+		if (!mode_info->roi_caps.enabled)
 			continue;
 
 		if (sde_crtc_state->user_roi_list.num_rects >
-				mode_info.roi_caps.num_roi) {
+				mode_info->roi_caps.num_roi) {
 			SDE_ERROR("roi count is exceeding limit, %d > %d\n",
 					sde_crtc_state->user_roi_list.num_rects,
-					mode_info.roi_caps.num_roi);
-			return -E2BIG;
+					mode_info->roi_caps.num_roi);
+			rc = -E2BIG;
+			goto end;
 		}
 
 		rc = _sde_crtc_set_crtc_roi(crtc, state);
 		if (rc)
-			return rc;
+			goto end;
 
 		rc = _sde_crtc_check_autorefresh(crtc, state);
 		if (rc)
-			return rc;
+			goto end;
 
 		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
 			rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx);
 			if (rc)
-				return rc;
+				goto end;
 		}
 
 		rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state);
 		if (rc)
-			return rc;
+			goto end;
 
 		rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state);
 		if (rc)
-			return rc;
+			goto end;
 	}
-
-	return 0;
+end:
+	kfree(mode_info);
+	return rc;
 }
 
 static u32 _sde_crtc_calc_gcd(u32 a, u32 b)
@@ -2756,8 +2778,10 @@ void sde_crtc_get_frame_data(struct drm_crtc *crtc)
 	data->frame_count = sde_crtc->fps_info.frame_count;
 
 	/* Collect plane specific data */
-	drm_for_each_plane_mask(plane, crtc->dev, sde_crtc->plane_mask_old)
-		sde_plane_get_frame_data(plane, &data->plane_frame_data[i]);
+	drm_for_each_plane_mask(plane, crtc->dev, sde_crtc->plane_mask_old) {
+		if (i < SDE_FRAME_DATA_MAX_PLANES)
+			sde_plane_get_frame_data(plane, &data->plane_frame_data[i++]);
+	}
 
 	if (frame_data->cnt)
 		_sde_crtc_frame_data_notify(crtc, data);

+ 1 - 0
msm/sde/sde_encoder.c

@@ -3272,6 +3272,7 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 				phys->ops.disable(phys);
 		}
 	} else {
+		sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 

+ 15 - 6
msm/sde/sde_encoder_phys_wb.c

@@ -1024,8 +1024,8 @@ static int sde_encoder_phys_wb_atomic_check(struct sde_encoder_phys *phys_enc,
 	/* bypass check if commit with no framebuffer */
 	fb = sde_wb_connector_state_get_output_fb(conn_state);
 	if (!fb) {
-		SDE_DEBUG("[enc:%d wb:%d] no out fb\n", DRMID(phys_enc->parent), WBID(wb_enc));
-		return 0;
+		SDE_ERROR("[enc:%d wb:%d] no out fb\n", DRMID(phys_enc->parent), WBID(wb_enc));
+		return -EINVAL;
 	}
 
 	fmt = sde_get_sde_format_ext(fb->format->format, fb->modifier);
@@ -1430,23 +1430,32 @@ static void _sde_encoder_phys_wb_setup_dnsc_blur(struct sde_encoder_phys *phys_e
 	int i;
 	bool enable;
 
-	if (!sde_kms->catalog->dnsc_blur_count || !hw_dnsc_blur || !hw_pp
-			|| !hw_dnsc_blur->ops.setup_dnsc_blur)
+	if (!sde_kms->catalog->dnsc_blur_count || !hw_pp)
 		return;
 
 	sde_conn = to_sde_connector(wb_dev->connector);
 	sde_conn_state = to_sde_connector_state(wb_dev->connector->state);
 
-	if (sde_conn_state->dnsc_blur_count && !hw_dnsc_blur) {
+	if (sde_conn_state->dnsc_blur_count
+			&& (!hw_dnsc_blur || !hw_dnsc_blur->ops.setup_dnsc_blur)) {
 		SDE_ERROR("[enc:%d wb:%d] invalid config - dnsc_blur block not reserved\n",
 			DRMID(phys_enc->parent), WBID(wb_enc));
-		sde_kms->catalog->dnsc_blur_count = 0;
 		return;
 	}
 
 	/* swap between 0 & 1 lut idx on each config change for gaussian lut */
 	sde_conn_state->dnsc_blur_lut = 1 - sde_conn_state->dnsc_blur_lut;
 
+	/*
+	 * disable dnsc_blur case - safe to update the opmode as dynamic switching of
+	 * dnsc_blur hw block between WBs are not supported currently.
+	 */
+	if (hw_dnsc_blur && !sde_conn_state->dnsc_blur_count) {
+		hw_dnsc_blur->ops.setup_dnsc_blur(hw_dnsc_blur, NULL, 0);
+		SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), SDE_EVTLOG_FUNC_CASE1);
+		return;
+	}
+
 	for (i = 0; i < sde_conn_state->dnsc_blur_count; i++) {
 		cfg = &sde_conn_state->dnsc_blur_cfg[i];
 

+ 13 - 1
msm/sde/sde_hw_rc.c

@@ -749,6 +749,7 @@ int sde_hw_rc_check_mask(struct sde_hw_dspp *hw_dspp, void *cfg)
 
 	if ((hw_cfg->len == 0 && hw_cfg->payload == NULL)) {
 		SDE_DEBUG("RC feature disabled, skip mask checks\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return 0;
 	}
 
@@ -802,6 +803,7 @@ int sde_hw_rc_check_pu_roi(struct sde_hw_dspp *hw_dspp, void *cfg)
 		SDE_DEBUG("full frame update\n");
 		memset(&empty_roi_list, 0, sizeof(struct msm_roi_list));
 		roi_list = &empty_roi_list;
+		SDE_EVT32(RC_IDX(hw_dspp));
 	}
 
 	rc_mask_cfg = RC_STATE(hw_dspp).last_rc_mask_cfg;
@@ -810,6 +812,7 @@ int sde_hw_rc_check_pu_roi(struct sde_hw_dspp *hw_dspp, void *cfg)
 	/* early return when there is no mask in memory */
 	if (!mask_programmed || !rc_mask_cfg) {
 		SDE_DEBUG("no previous rc mask programmed\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return SDE_HW_RC_PU_SKIP_OP;
 	}
 
@@ -869,10 +872,12 @@ int sde_hw_rc_setup_pu_roi(struct sde_hw_dspp *hw_dspp, void *cfg)
 
 	rc_mask_cfg = RC_STATE(hw_dspp).last_rc_mask_cfg;
 	mask_programmed = RC_STATE(hw_dspp).mask_programmed;
+	SDE_EVT32(RC_IDX(hw_dspp), roi_list, rc_mask_cfg, mask_programmed);
 
 	/* early return when there is no mask in memory */
 	if (!mask_programmed || !rc_mask_cfg) {
 		SDE_DEBUG("no previous rc mask programmed\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return SDE_HW_RC_PU_SKIP_OP;
 	}
 
@@ -936,7 +941,9 @@ int sde_hw_rc_setup_mask(struct sde_hw_dspp *hw_dspp, void *cfg)
 		memset(RC_STATE(hw_dspp).last_roi_list, 0,
 				sizeof(struct msm_roi_list));
 		RC_STATE(hw_dspp).roi_programmed = false;
-
+		SDE_EVT32(RC_IDX(hw_dspp), RC_STATE(hw_dspp).last_rc_mask_cfg,
+				RC_STATE(hw_dspp).mask_programmed,
+				RC_STATE(hw_dspp).roi_programmed);
 		return 0;
 	}
 
@@ -957,6 +964,7 @@ int sde_hw_rc_setup_mask(struct sde_hw_dspp *hw_dspp, void *cfg)
 		SDE_DEBUG("partial frame update\n");
 		sde_kms_rect_merge_rectangles(last_roi_list, &merged_roi);
 	}
+	SDE_EVT32(RC_IDX(hw_dspp), roi_programmed);
 
 	rc = _sde_hw_rc_get_ajusted_roi(hw_cfg, &merged_roi, &rc_roi);
 	if (rc) {
@@ -1003,6 +1011,7 @@ int sde_hw_rc_setup_data_dma(struct sde_hw_dspp *hw_dspp, void *cfg)
 
 	if ((hw_cfg->len == 0 && hw_cfg->payload == NULL)) {
 		SDE_DEBUG("RC feature disabled, skip data programming\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return 0;
 	}
 
@@ -1016,6 +1025,7 @@ int sde_hw_rc_setup_data_dma(struct sde_hw_dspp *hw_dspp, void *cfg)
 
 	if (rc_mask_cfg->flags & SDE_HW_RC_SKIP_DATA_PROG) {
 		SDE_DEBUG("skip data programming\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return 0;
 	}
 
@@ -1042,6 +1052,7 @@ int sde_hw_rc_setup_data_ahb(struct sde_hw_dspp *hw_dspp, void *cfg)
 
 	if ((hw_cfg->len == 0 && hw_cfg->payload == NULL)) {
 		SDE_DEBUG("rc feature disabled, skip data programming\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return 0;
 	}
 
@@ -1055,6 +1066,7 @@ int sde_hw_rc_setup_data_ahb(struct sde_hw_dspp *hw_dspp, void *cfg)
 
 	if (rc_mask_cfg->flags & SDE_HW_RC_SKIP_DATA_PROG) {
 		SDE_DEBUG("skip data programming\n");
+		SDE_EVT32(RC_IDX(hw_dspp));
 		return 0;
 	}
 

+ 26 - 14
msm/sde/sde_kms.c

@@ -2655,6 +2655,11 @@ error:
 		drm_framebuffer_put(fb);
 	}
 
+	drm_for_each_crtc(crtc, dev) {
+		if (!ret && crtc_mask & drm_crtc_mask(crtc))
+			sde_kms_cancel_delayed_work(crtc);
+	}
+
 end:
 	return ret;
 }
@@ -3881,6 +3886,7 @@ static int sde_kms_trigger_null_flush(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms;
 	struct sde_splash_display *splash_display;
+	struct drm_crtc *crtc;
 	int i, rc = 0;
 
 	if (!kms) {
@@ -3890,28 +3896,42 @@ static int sde_kms_trigger_null_flush(struct msm_kms *kms)
 
 	sde_kms = to_sde_kms(kms);
 
-	if (!sde_kms->splash_data.num_splash_displays ||
-		sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
-		return rc;
+	/* If splash handoff is done, early return*/
+	if (!sde_kms->splash_data.num_splash_displays)
+		return 0;
+
+	/* If all builtin-displays are having cont splash enabled, ignore lastclose*/
+	if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
+		return -EINVAL;
 
+	/*
+	 * Trigger NULL flush if built-in secondary/primary is stuck in splash
+	 * while the  primary/secondary is running respectively before lastclose.
+	 */
 	for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
 		splash_display = &sde_kms->splash_data.splash_display[i];
 
 		if (splash_display->cont_splash_enabled && splash_display->encoder) {
+			crtc = splash_display->encoder->crtc;
 			SDE_DEBUG("triggering null commit on enc:%d\n",
 					DRMID(splash_display->encoder));
 			SDE_EVT32(DRMID(splash_display->encoder), SDE_EVTLOG_FUNC_ENTRY);
 			rc = _sde_kms_null_commit(sde_kms->dev, splash_display->encoder);
+
+			if (!rc && crtc)
+				sde_kms_cancel_delayed_work(crtc);
+			if (rc)
+				DRM_ERROR("null flush commit failure during lastclose\n");
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
 	struct device *dev)
 {
-	int i, ret, crtc_id = 0;
+	int ret, crtc_id = 0;
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct drm_connector *conn;
 	struct drm_connector_list_iter conn_iter;
@@ -3948,15 +3968,7 @@ static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
 	}
 	drm_connector_list_iter_end(&conn_iter);
 
-	for (i = 0; i < priv->num_crtcs; i++) {
-		if (priv->disp_thread[i].thread)
-			kthread_flush_worker(
-				&priv->disp_thread[i].worker);
-		if (priv->event_thread[i].thread)
-			kthread_flush_worker(
-				&priv->event_thread[i].worker);
-	}
-	kthread_flush_worker(&priv->pp_event_worker);
+	msm_atomic_flush_display_threads(priv);
 }
 
 struct msm_display_mode *sde_kms_get_msm_mode(struct drm_connector_state *conn_state)

+ 2 - 2
msm/sde/sde_plane.c

@@ -3992,8 +3992,8 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 			PLANE_PROP_FB_TRANSLATION_MODE);
 
 	if (psde->pipe_hw->ops.set_ubwc_stats_roi)
-		msm_property_install_range(&psde->property_info, "ubwc_stats_roi",
-				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_UBWC_STATS_ROI);
+		msm_property_install_volatile_range(&psde->property_info, "ubwc_stats_roi",
+				0, 0, ~0, 0, PLANE_PROP_UBWC_STATS_ROI);
 	vfree(info);
 }
 

+ 16 - 8
msm/sde/sde_rm.c

@@ -286,27 +286,33 @@ void sde_rm_get_resource_info(struct sde_rm *rm,
 {
 	struct sde_rm_hw_blk *blk;
 	enum sde_hw_blk_type type;
-	struct sde_rm_rsvp rsvp;
 	const struct sde_lm_cfg *lm_cfg;
 	bool is_built_in, is_pref;
 	u32 lm_pref = (BIT(SDE_DISP_PRIMARY_PREF) | BIT(SDE_DISP_SECONDARY_PREF));
 
+	mutex_lock(&rm->rm_lock);
+
 	/* Get all currently available resources */
 	memcpy(avail_res, &rm->avail_res,
 			sizeof(rm->avail_res));
 
+	/**
+	 * When the encoder is null, assume display is external in order to return the count of
+	 * availalbe non-preferred LMs
+	 */
 	if (!drm_enc)
-		return;
-
-	is_built_in = sde_encoder_is_built_in_display(drm_enc);
-
-	rsvp.enc_id = drm_enc->base.id;
+		is_built_in = false;
+	else
+		is_built_in = sde_encoder_is_built_in_display(drm_enc);
 
 	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
 			/* Add back resources allocated to the given encoder */
-			if (blk->rsvp && blk->rsvp->enc_id == rsvp.enc_id)
+			if (blk->rsvp && drm_enc && blk->rsvp->enc_id == drm_enc->base.id) {
 				_sde_rm_inc_resource_info(rm, avail_res, blk);
+				if (type == SDE_HW_BLK_LM)
+					avail_res->num_lm_in_use++;
+			}
 
 			/**
 			 * Remove unallocated preferred lms that cannot reserved
@@ -316,11 +322,13 @@ void sde_rm_get_resource_info(struct sde_rm *rm,
 				lm_cfg = to_sde_hw_mixer(blk->hw)->cap;
 				is_pref = lm_cfg->features & lm_pref;
 
-				if (!blk->rsvp && !is_built_in && is_pref)
+				if (!blk->rsvp && !blk->rsvp_nxt && !is_built_in && is_pref)
 					_sde_rm_dec_resource_info(rm, avail_res, blk);
 			}
 		}
 	}
+
+	mutex_unlock(&rm->rm_lock);
 }
 
 static void _sde_rm_print_rsvps(

+ 16 - 13
msm/sde_dbg.c

@@ -1191,7 +1191,6 @@ void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
 	char buf[SDE_EVTLOG_BUF_MAX];
 	bool update_last_entry = true;
 	u32 in_log, in_mem, in_dump;
-	u32 log_size = 0;
 	char *dump_addr = NULL;
 	int i;
 
@@ -1202,21 +1201,22 @@ void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
 	in_mem = evtlog->dump_mode & SDE_DBG_DUMP_IN_MEM;
 	in_dump = evtlog->dump_mode & SDE_DBG_DUMP_IN_COREDUMP;
 
-	log_size = sde_evtlog_count(evtlog);
-	if (!log_size)
-		return;
-
 	if (!evtlog->dumped_evtlog) {
-		if (in_mem)
-			log_size = SDE_EVTLOG_ENTRY;
-		evtlog->dumped_evtlog = kvzalloc((log_size * SDE_EVTLOG_BUF_MAX), GFP_KERNEL);
-		evtlog->log_size = log_size;
+		evtlog->dumped_evtlog = kvzalloc((SDE_EVTLOG_ENTRY * SDE_EVTLOG_BUF_MAX),
+				GFP_KERNEL);
+		if (!evtlog->dumped_evtlog)
+			return;
+
+		evtlog->log_size = SDE_EVTLOG_ENTRY;
 	}
 	dump_addr = evtlog->dumped_evtlog;
 
 	if ((in_mem || in_dump) && dump_addr && (!sde_dbg_base.coredump_reading)) {
-		while (sde_evtlog_dump_to_buffer(evtlog, dump_addr, SDE_EVTLOG_BUF_MAX,
-				update_last_entry, true)) {
+		for (i =  0; i < evtlog->log_size; i++) {
+			if (!sde_evtlog_dump_to_buffer(evtlog, dump_addr, SDE_EVTLOG_BUF_MAX,
+					update_last_entry, true))
+				break;
+
 			dump_addr += SDE_EVTLOG_BUF_MAX;
 			update_last_entry = false;
 		}
@@ -1232,8 +1232,11 @@ void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
 	}
 
 	if (in_log) {
-		while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf),
-					update_last_entry, false)) {
+		for (i =  0; i < evtlog->log_size; i++) {
+			if (!sde_evtlog_dump_to_buffer(evtlog, buf, SDE_EVTLOG_BUF_MAX,
+					update_last_entry, false))
+				break;
+
 			pr_info("%s\n", buf);
 			update_last_entry = false;
 		}

+ 5 - 0
msm/sde_rsc_hw_v3.c

@@ -429,6 +429,11 @@ static int sde_rsc_state_update_v3(struct sde_rsc_priv *rsc,
 							reg, rsc->debug_mode);
 		wmb(); /* make sure that solver is enabled */
 
+		if (rsc->hw_ops.bwi_status) {
+			rsc->bwi_update = BW_NO_CHANGE;
+			rsc->hw_ops.bwi_status(rsc);
+		}
+
 		break;
 
 	case SDE_RSC_VID_STATE: