Forráskód Böngészése

Merge remote-tracking branch 'quic/display-kernel.lnx.5.10' into display-kernel.lnx.5.15

* quic/display-kernel.lnx.5.10:
  disp: msm: sde: avoid error during fal10_veto override enablement
  disp: msm: update copyright description
  disp: msm: sde: configure dest_scaler op_mode for two independent displays
  disp: msm: dp: updated copyright set for 4nm target
  Revert "disp: msm: sde: consider max of actual and default prefill lines"
  disp: msm: sde: Reset backlight scale when HWC is stopped
  disp: msm: dp: avoid duplicate read of link status
  disp: msm: dsi: update vreg_ctrl settings for cape
  disp: msm: fail commit if drm_gem_obj was found attached to a sec CB
  disp: msm: dp: updated register values for 4nm target
  disp: msm: sde: update framedata event handling
  disp: msm: dsi: Add new phy comaptible string for cape
  disp: msm: sde: software override for fal10 in cwb enable
  disp: msm: update cleanup during bind failure in msm_drm_component_init
  disp: msm: sde: dump user input_fence info on spec fence timeout
  disp: msm: sde: add null pointer check for encoder current master
  disp: msm: dsi: enable DMA start window scheduling for broadcast commands
  disp: msm: sde: avoid alignment checks for linear formats
  disp: msm: reset thread priority work on every new run
  disp: msm: sde: send power on event for cont. splash
  disp: msm: sde: always set CTL_x_UIDLE_ACTIVE register to "1"
  disp: msm: use vzalloc for large allocations
  disp: msm: sde: Add support to limit DSC size to 10k
  disp: msm: sde: add tx wait during DMS for sim panel
  disp: msm: dsi: add check for any queued DSI CMDs before clock force update
  disp: msm: sde: correct pp block allocation during dcwb dither programming
  disp: msm: sde: avoid setting of max vblank count
  disp: msm: sde: add cached lut flag in sde plane
  disp: msm: sde: avoid use after free in msm_lastclose
  disp: msm: sde: update TEAR_SYNC_WRCOUNT register before vsync counter
  disp: msm: dsi: Support uncompressed rgb101010 format
  disp: msm: sde: update idle_pc_enabled flag for all encoders
  disp: msm: sde: flush esd work before disabling the encoder
  disp: msm: sde: allow qsync update along with modeset
  disp: msm: dp: avoid dp sw reset on disconnect path
  disp: msm: sde: consider max of actual and default prefill lines
  disp: msm: ensure vbif debugbus not in use is disabled
  disp: msm: sde: update cached encoder mask if required
  disp: msm: sde: while timing engine enabling poll for active region
  disp: msm: enable cache flag for dumb buffer
  disp: msm: sde: disable ot limit for cwb
  disp: msm: sde: avoid race condition at vm release
  disp: msm: dsi: set qsync min fps list length to zero
  disp: msm: sde: reset mixers in crtc when ctl datapath switches
  disp: msm: sde: update vm state atomic check for non-primary usecases
  disp: msm: sde: reset CTL_UIDLE_ACTIVE register only if uidle is disabled

Change-Id: If480e7f33743eb4788549f853ba05e744ecb38d3
Signed-off-by: Narendra Muppalla <[email protected]>
Narendra Muppalla 3 éve
szülő
commit
daa511cb90

+ 1 - 1
config/gki_parrotdispconf.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #define CONFIG_DRM_MSM 1

+ 2 - 1
msm/dp/dp_catalog_v420.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 
@@ -107,7 +108,7 @@ static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux,
 	if (phy_version >= 0x60000000) {
 		/* Turn on BIAS current for PHY/PLL */
 		io_data = catalog->io->dp_pll;
-		dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600, 0x1D);
+		dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600, 0x17);
 		wmb(); /* make sure BIAS programming happened */
 	} else {
 		/* Turn on BIAS current for PHY/PLL */

+ 1 - 1
msm/dp/dp_ctrl.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 

+ 13 - 13
msm/dp/dp_pll_4nm.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 /*
@@ -236,9 +236,9 @@ static int dp_vco_pll_init_db_4nm(struct dp_pll_db *pdb,
 		pdb->lock_cmp2_mode0 = 0x0e;
 		pdb->phy_vco_div = 0x1;
 		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x13;
+		pdb->ssc_step_size1_mode0 = 0x45;
 		pdb->ssc_step_size2_mode0 = 0x06;
-		pdb->ssc_per1 = 0x40;
+		pdb->ssc_per1 = 0x36;
 		pdb->cmp_code1_mode0 = 0xE2;
 		pdb->cmp_code2_mode0 = 0x18;
 		break;
@@ -252,9 +252,9 @@ static int dp_vco_pll_init_db_4nm(struct dp_pll_db *pdb,
 		pdb->lock_cmp2_mode0 = 0x1c;
 		pdb->phy_vco_div = 0x2;
 		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x1a;
+		pdb->ssc_step_size1_mode0 = 0x5C;
 		pdb->ssc_step_size2_mode0 = 0x08;
-		pdb->ssc_per1 = 0x40;
+		pdb->ssc_per1 = 0x36;
 		pdb->cmp_code1_mode0 = 0x2E;
 		pdb->cmp_code2_mode0 = 0x21;
 		break;
@@ -268,9 +268,9 @@ static int dp_vco_pll_init_db_4nm(struct dp_pll_db *pdb,
 		pdb->lock_cmp2_mode0 = 0x2a;
 		pdb->phy_vco_div = 0x0;
 		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x13;
+		pdb->ssc_step_size1_mode0 = 0x45;
 		pdb->ssc_step_size2_mode0 = 0x06;
-		pdb->ssc_per1 = 0x40;
+		pdb->ssc_per1 = 0x36;
 		pdb->cmp_code1_mode0 = 0xE2;
 		pdb->cmp_code2_mode0 = 0x18;
 		break;
@@ -354,9 +354,9 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	if (pll->bonding_en)
 		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1f);
 	else
-		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1D);
+		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17);
 
-	dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, 0x1f);
+	dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, 0x0f);
 	dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, pdb->cmp_code1_mode0);
 	dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, pdb->cmp_code2_mode0);
 	/* Make sure the PHY register writes are done */
@@ -393,8 +393,8 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	dp_pll_write(dp_ln_tx0, DP_TRAN_DRVR_EMP_EN, 0xf);
 	dp_pll_write(dp_ln_tx0, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
 	dp_pll_write(dp_ln_tx0, DP_TX_INTERFACE_MODE, 0x00);
-	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
-	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, 0x0C);
+	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, 0x0C);
 	dp_pll_write(dp_ln_tx0, TXn_TX_BAND, 0x04);
 	/* Make sure the PLL register writes are done */
 	wmb();
@@ -409,8 +409,8 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	dp_pll_write(dp_ln_tx1, DP_TRAN_DRVR_EMP_EN, 0xf);
 	dp_pll_write(dp_ln_tx1, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
 	dp_pll_write(dp_ln_tx1, DP_TX_INTERFACE_MODE, 0x00);
-	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
-	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, 0x0C);
+	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, 0x0C);
 	dp_pll_write(dp_ln_tx1, TXn_TX_BAND, 0x04);
 	/* Make sure the PHY register writes are done */
 	wmb();

+ 1 - 0
msm/dsi/dsi_catalog.c

@@ -300,6 +300,7 @@ int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
 	case DSI_PHY_VERSION_4_1:
 	case DSI_PHY_VERSION_4_2:
 	case DSI_PHY_VERSION_4_3:
+	case DSI_PHY_VERSION_4_3_2:
 		dsi_catalog_phy_4_0_init(phy);
 		break;
 	case DSI_PHY_VERSION_5_2:

+ 8 - 4
msm/dsi/dsi_ctrl.c

@@ -418,6 +418,7 @@ static void dsi_ctrl_clear_dma_status(struct dsi_ctrl *dsi_ctrl)
 static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl)
 {
 	int rc = 0;
+	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
 	struct dsi_clk_ctrl_info clk_info;
 	u32 mask = BIT(DSI_FIFO_OVERFLOW);
 
@@ -434,6 +435,10 @@ static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl)
 		dsi_ctrl_dma_cmd_wait_for_done(dsi_ctrl);
 	}
 
+	if (dsi_ctrl->hw.reset_trig_ctrl)
+		dsi_hw_ops.reset_trig_ctrl(&dsi_ctrl->hw,
+				&dsi_ctrl->host_config.common_config);
+
 	/* Command engine disable, unmask overflow, remove vote on clocks and gdsc */
 	rc = dsi_ctrl_set_cmd_engine_state(dsi_ctrl, DSI_CTRL_ENGINE_OFF, false);
 	if (rc)
@@ -1005,6 +1010,9 @@ int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format)
 	case DSI_PIXEL_FORMAT_RGB888:
 		bpp = 24;
 		break;
+	case DSI_PIXEL_FORMAT_RGB101010:
+		bpp = 30;
+		break;
 	default:
 		bpp = 24;
 		break;
@@ -1371,10 +1379,6 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags,
 		msg->flags);
 
-	if (dsi_ctrl->hw.reset_trig_ctrl)
-		dsi_hw_ops.reset_trig_ctrl(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.common_config);
-
 	if (dsi_hw_ops.splitlink_cmd_setup && split_link->enabled)
 		dsi_hw_ops.splitlink_cmd_setup(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.common_config, flags);

+ 8 - 5
msm/dsi/dsi_ctrl_hw_cmn.c

@@ -41,9 +41,9 @@ static bool dsi_compression_enabled(struct dsi_mode_info *mode)
 
 /* Unsupported formats default to RGB888 */
 static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
-	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4, 0x9 };
 static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
-	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3, 0x4 };
 
 /**
  * dsi_split_link_setup() - setup dsi split link configurations
@@ -543,10 +543,13 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 	u32 reg = 0, offset = 0;
 	int pic_width = 0, this_frame_slices = 0, intf_ip_w = 0;
 	u32 pkt_per_line = 0, eol_byte_num = 0, bytes_in_slice = 0;
+	u32 bpp;
 
 	if (roi && (!roi->w || !roi->h))
 		return;
 
+	bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
+
 	if (dsi_dsc_compression_enabled(mode)) {
 		struct msm_display_dsc_info dsc;
 
@@ -580,11 +583,11 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 		bytes_in_slice = vdc.bytes_in_slice;
 	} else if (roi) {
 		width_final = roi->w;
-		stride_final = roi->w * 3;
+		stride_final = DIV_ROUND_UP(roi->w * bpp, 8);
 		height_final = roi->h;
 	} else {
 		width_final = mode->h_active;
-		stride_final = mode->h_active * 3;
+		stride_final = DIV_ROUND_UP(mode->h_active * bpp, 8);
 		height_final = mode->v_active;
 	}
 
@@ -701,7 +704,7 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
 	reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
 	reg |= (cfg->traffic_mode & 0x3) << 8;
 	reg |= (cfg->vc_id & 0x3);
-	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x7) << 4;
 	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
 
 	reg = (common_cfg->swap_mode & 0x7) << 12;

+ 4 - 0
msm/dsi/dsi_defs.h

@@ -39,6 +39,7 @@
  * @DSI_PIXEL_FORMAT_RGB111:
  * @DSI_PIXEL_FORMAT_RGB332:
  * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_RGB101010:
  * @DSI_PIXEL_FORMAT_MAX:
  */
 enum dsi_pixel_format {
@@ -49,6 +50,7 @@ enum dsi_pixel_format {
 	DSI_PIXEL_FORMAT_RGB111,
 	DSI_PIXEL_FORMAT_RGB332,
 	DSI_PIXEL_FORMAT_RGB444,
+	DSI_PIXEL_FORMAT_RGB101010,
 	DSI_PIXEL_FORMAT_MAX
 };
 
@@ -767,6 +769,8 @@ static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
 		return 8;
 	case DSI_PIXEL_FORMAT_RGB444:
 		return 12;
+	case DSI_PIXEL_FORMAT_RGB101010:
+		return 30;
 	}
 	return 24;
 }

+ 21 - 2
msm/dsi/dsi_display.c

@@ -706,14 +706,18 @@ static void dsi_display_set_cmd_tx_ctrl_flags(struct dsi_display *display,
 		/*
 		 * Set flags for command scheduling.
 		 * 1) In video mode command DMA scheduling is default.
-		 * 2) In command mode command DMA scheduling depends on message
+		 * 2) In command mode unicast command DMA scheduling depends on message
 		 * flag and TE needs to be running.
+		 * 3) In command mode broadcast command DMA scheduling is default and
+		 * TE needs to be running.
 		 */
 		if (display->panel->panel_mode == DSI_OP_VIDEO_MODE) {
 			flags |= DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
 		} else {
 			if (msg->flags & MIPI_DSI_MSG_CMD_DMA_SCHED)
 				flags |= DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
+			if (flags & DSI_CTRL_CMD_BROADCAST)
+				flags |= DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
 			if (!display->enabled)
 				flags &= ~DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
 		}
@@ -5363,7 +5367,22 @@ int dsi_display_splash_res_cleanup(struct  dsi_display *display)
 
 static int dsi_display_force_update_dsi_clk(struct dsi_display *display)
 {
-	int rc = 0;
+	int rc = 0, i = 0;
+	struct dsi_display_ctrl *ctrl;
+
+	/*
+	 * The force update dsi clock, is the only clock update function that toggles the state of
+	 * DSI clocks without any ref count protection. With the addition of ASYNC command wait,
+	 * there is a need for adding a check for any queued waits before updating these clocks.
+	 */
+	display_for_each_ctrl(i, display) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || !(ctrl->ctrl->post_tx_queued))
+			continue;
+		flush_workqueue(display->post_cmd_tx_workq);
+		cancel_work_sync(&ctrl->ctrl->post_cmd_tx_work);
+		ctrl->ctrl->post_tx_queued = false;
+	}
 
 	rc = dsi_display_link_clk_force_update_ctrl(display->dsi_clk_handle);
 

+ 7 - 1
msm/dsi/dsi_panel.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -945,6 +946,9 @@ static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
 	case 18:
 		fmt = DSI_PIXEL_FORMAT_RGB666;
 		break;
+	case 30:
+		fmt = DSI_PIXEL_FORMAT_RGB101010;
+		break;
 	case 24:
 	default:
 		fmt = DSI_PIXEL_FORMAT_RGB888;
@@ -1326,8 +1330,10 @@ static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel,
 	 */
 	qsync_caps->qsync_min_fps_list_len = utils->count_u32_elems(utils->data,
 				  "qcom,dsi-supported-qsync-min-fps-list");
-	if (qsync_caps->qsync_min_fps_list_len < 1)
+	if (qsync_caps->qsync_min_fps_list_len < 1) {
+		qsync_caps->qsync_min_fps_list_len = 0;
 		goto qsync_support;
+	}
 
 	/**
 	 * qcom,dsi-supported-qsync-min-fps-list cannot be defined

+ 11 - 0
msm/dsi/dsi_phy.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/of_device.h>
@@ -71,6 +72,14 @@ static const struct dsi_ver_spec_info dsi_phy_v4_3 = {
 	.timing_cfg_count = 14,
 };
 
+static const struct dsi_ver_spec_info dsi_phy_v4_3_2 = {
+	.version = DSI_PHY_VERSION_4_3_2,
+	.lane_cfg_count = 4,
+	.strength_cfg_count = 2,
+	.regulator_cfg_count = 0,
+	.timing_cfg_count = 14,
+};
+
 static const struct dsi_ver_spec_info dsi_phy_v5_2 = {
 	.version = DSI_PHY_VERSION_5_2,
 	.lane_cfg_count = 4,
@@ -90,6 +99,8 @@ static const struct of_device_id msm_dsi_phy_of_match[] = {
 	  .data = &dsi_phy_v4_2,},
 	{ .compatible = "qcom,dsi-phy-v4.3",
 	  .data = &dsi_phy_v4_3,},
+	{ .compatible = "qcom,dsi-phy-v4.3.2",
+	  .data = &dsi_phy_v4_3_2,},
 	{ .compatible = "qcom,dsi-phy-v5.2",
 	  .data = &dsi_phy_v5_2,},
 	{}

+ 2 - 0
msm/dsi/dsi_phy_hw.h

@@ -35,6 +35,7 @@
  * @DSI_PHY_VERSION_4_1:	7nm
  * @DSI_PHY_VERSION_4_2:        5nm
  * @DSI_PHY_VERSION_4_3:        5nm
+ * @DSI_PHY_VERSION_4_3_2:	4nm (v4.3 specific to SM8475)
  * @DSI_PHY_VERSION_5_2:        4nm
  * @DSI_PHY_VERSION_MAX:
  */
@@ -45,6 +46,7 @@ enum dsi_phy_version {
 	DSI_PHY_VERSION_4_1, /* 7nm */
 	DSI_PHY_VERSION_4_2, /* 5nm */
 	DSI_PHY_VERSION_4_3, /* 5nm */
+	DSI_PHY_VERSION_4_3_2, /* 4nm */
 	DSI_PHY_VERSION_5_2, /* 4nm */
 	DSI_PHY_VERSION_MAX
 };

+ 16 - 3
msm/dsi/dsi_phy_hw_v4_0.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/math64.h>
@@ -247,6 +248,7 @@ static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy,
 	u32 minor_ver = 0;
 	/* For C-PHY, no low power settings for lower clk rate */
 	u32 vreg_ctrl_0 = 0x51;
+	u32 vreg_ctrl_1 = 0x55;
 	u32 glbl_str_swi_cal_sel_ctrl = 0;
 	u32 glbl_hstx_str_ctrl_0 = 0;
 	u32 glbl_rescode_top_ctrl = 0;
@@ -272,6 +274,11 @@ static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy,
 		glbl_rescode_bot_ctrl = 0x3c;
 	}
 
+	if (phy->version == DSI_PHY_VERSION_4_3_2) {
+		vreg_ctrl_0 = 0x45;
+		vreg_ctrl_1 = 0x41;
+	}
+
 	/* de-assert digital and pll power down */
 	data = BIT(6) | BIT(5);
 	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
@@ -295,7 +302,7 @@ static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy,
 
 	/* Enable LDO */
 	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x55);
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
 	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
 					glbl_str_swi_cal_sel_ctrl);
 	DSI_W32(phy, DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0);
@@ -356,6 +363,7 @@ static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy,
 	u32 minor_ver = 0;
 	bool less_than_1500_mhz = false;
 	u32 vreg_ctrl_0 = 0;
+	u32 vreg_ctrl_1 = 0x5c;
 	u32 glbl_str_swi_cal_sel_ctrl = 0;
 	u32 glbl_hstx_str_ctrl_0 = 0;
 	u32 glbl_rescode_top_ctrl = 0;
@@ -390,6 +398,11 @@ static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy,
 	if (phy->version >= DSI_PHY_VERSION_4_3)
 		glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
 
+	if (phy->version == DSI_PHY_VERSION_4_3_2){
+		vreg_ctrl_0 = 0x44;
+		vreg_ctrl_1 = 0x19;
+	}
+
 	split_link_enabled = cfg->split_link.enabled;
 	lanes_per_sublink = cfg->split_link.lanes_per_sublink;
 	/* de-assert digital and pll power down */
@@ -418,7 +431,7 @@ static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy,
 
 	/* Enable LDO */
 	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x5c);
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
 	DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x00);
 	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
 					glbl_str_swi_cal_sel_ctrl);
@@ -491,7 +504,7 @@ void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
 		pr_warn("PLL turned on before configuring PHY\n");
 
 	/* Request for REFGEN ready */
-	if (phy->version == DSI_PHY_VERSION_4_3) {
+	if (phy->version >= DSI_PHY_VERSION_4_3) {
 		DSI_W32(phy, DSIPHY_CMN_GLBL_DIGTOP_SPARE10, 0x1);
 		udelay(500);
 	}

+ 3 - 1
msm/dsi/dsi_phy_timing_calc.c

@@ -1,12 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "dsi_phy_timing_calc.h"
 
 static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
-	16, 18, 18, 24, 3, 8, 12 };
+	16, 18, 18, 24, 3, 8, 12, 30 };
 
 static int dsi_phy_cmn_validate_and_set(struct timing_entry *t,
 	char const *t_name)
@@ -994,6 +995,7 @@ int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
 	case DSI_PHY_VERSION_4_1:
 	case DSI_PHY_VERSION_4_2:
 	case DSI_PHY_VERSION_4_3:
+	case DSI_PHY_VERSION_4_3_2:
 	case DSI_PHY_VERSION_5_2:
 		ops->get_default_phy_params =
 			dsi_phy_hw_v4_0_get_default_phy_params;

+ 20 - 5
msm/msm_drv.c

@@ -519,8 +519,10 @@ static int msm_drm_uninit(struct device *dev)
 	drm_irq_uninstall(ddev);
 #endif
 
-	if (kms && kms->funcs)
+	if (kms && kms->funcs) {
 		kms->funcs->destroy(kms);
+		priv->kms = NULL;
+	}
 
 	if (priv->vram.paddr) {
 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
@@ -687,7 +689,6 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 {
 	int i, ret = 0;
 
-	kthread_init_work(&priv->thread_priority_work, msm_drm_display_thread_priority_worker);
 	for (i = 0; i < priv->num_crtcs; i++) {
 		/* initialize display thread */
 		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
@@ -697,7 +698,10 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 			kthread_run(kthread_worker_fn,
 				&priv->disp_thread[i].worker,
 				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
+		kthread_init_work(&priv->thread_priority_work,
+				  msm_drm_display_thread_priority_worker);
 		kthread_queue_work(&priv->disp_thread[i].worker, &priv->thread_priority_work);
+		kthread_flush_work(&priv->thread_priority_work);
 
 		if (IS_ERR(priv->disp_thread[i].thread)) {
 			dev_err(dev, "failed to create crtc_commit kthread\n");
@@ -719,7 +723,10 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 		 * frame_pending counters beyond 2. This can lead to commit
 		 * failure at crtc commit level.
 		 */
+		kthread_init_work(&priv->thread_priority_work,
+				  msm_drm_display_thread_priority_worker);
 		kthread_queue_work(&priv->event_thread[i].worker, &priv->thread_priority_work);
+		kthread_flush_work(&priv->thread_priority_work);
 
 		if (IS_ERR(priv->event_thread[i].thread)) {
 			dev_err(dev, "failed to create crtc_event kthread\n");
@@ -754,7 +761,9 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 	kthread_init_worker(&priv->pp_event_worker);
 	priv->pp_event_thread = kthread_run(kthread_worker_fn,
 			&priv->pp_event_worker, "pp_event");
+	kthread_init_work(&priv->thread_priority_work, msm_drm_display_thread_priority_worker);
 	kthread_queue_work(&priv->pp_event_worker, &priv->thread_priority_work);
+	kthread_flush_work(&priv->thread_priority_work);
 
 	if (IS_ERR(priv->pp_event_thread)) {
 		dev_err(dev, "failed to create pp_event kthread\n");
@@ -908,8 +917,12 @@ static int msm_drm_component_init(struct device *dev)
 
 	/* Bind all our sub-components: */
 	ret = msm_component_bind_all(dev, ddev);
-	if (ret)
+	if (ret == -EPROBE_DEFER) {
+		destroy_workqueue(priv->wq);
+		return ret;
+	} else if (ret) {
 		goto bind_fail;
+	}
 
 	ret = msm_init_vram(ddev);
 	if (ret)
@@ -1104,12 +1117,14 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 static void msm_lastclose(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
+	struct msm_kms *kms;
 	int i, rc;
 
-	if (!kms)
+	if (!priv || !priv->kms)
 		return;
 
+	kms = priv->kms;
+
 	/* check for splash status before triggering cleanup
 	 * if we end up here with splash status ON i.e before first
 	 * commit then ignore the last close call

+ 13 - 1
msm/msm_gem.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -451,6 +452,17 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 		if ((dev && obj->import_attach) &&
 				((dev != obj->import_attach->dev) ||
 				msm_obj->obj_dirty)) {
+
+			if (of_device_is_compatible(dev->of_node, "qcom,smmu_sde_unsec") &&
+				of_device_is_compatible(obj->import_attach->dev->of_node,
+				"qcom,smmu_sde_sec")) {
+				SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
+						 msm_obj->obj_dirty);
+				DRM_ERROR("gem obj found mapped to %s, now requesting map on %s",
+					dev_name(obj->import_attach->dev), dev_name(dev));
+				return -EINVAL;
+			}
+
 			dmabuf = obj->import_attach->dmabuf;
 			dma_map_attrs = obj->import_attach->dma_map_attrs;
 
@@ -653,7 +665,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	args->pitch = align_pitch(args->width, args->bpp);
 	args->size  = PAGE_ALIGN(args->pitch * args->height);
 	return msm_gem_new_handle(dev, file, args->size,
-			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
+			MSM_BO_SCANOUT | MSM_BO_CACHED, &args->handle, "dumb");
 }
 
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,

+ 4 - 4
msm/sde/sde_color_processing.c

@@ -4869,7 +4869,7 @@ void sde_cp_crtc_enable(struct drm_crtc *drm_crtc)
 	if (!num_mixers)
 		return;
 	mutex_lock(&crtc->crtc_cp_lock);
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	info = vzalloc(sizeof(struct sde_kms_info));
 	if (info) {
 		for (i = 0; i < ARRAY_SIZE(dspp_cap_update_func); i++)
 			dspp_cap_update_func[i](crtc, info);
@@ -4878,7 +4878,7 @@ void sde_cp_crtc_enable(struct drm_crtc *drm_crtc)
 			info->data, SDE_KMS_INFO_DATALEN(info),
 			CRTC_PROP_DSPP_INFO);
 	}
-	kfree(info);
+	vfree(info);
 	mutex_unlock(&crtc->crtc_cp_lock);
 }
 
@@ -4893,7 +4893,7 @@ void sde_cp_crtc_disable(struct drm_crtc *drm_crtc)
 	}
 	crtc = to_sde_crtc(drm_crtc);
 	mutex_lock(&crtc->crtc_cp_lock);
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	info = vzalloc(sizeof(struct sde_kms_info));
 	if (info)
 		msm_property_set_blob(&crtc->property_info,
 				&crtc->dspp_blob_info,
@@ -4904,7 +4904,7 @@ void sde_cp_crtc_disable(struct drm_crtc *drm_crtc)
 	crtc->skip_blend_plane_h = 0;
 	crtc->skip_blend_plane_w = 0;
 	mutex_unlock(&crtc->crtc_cp_lock);
-	kfree(info);
+	vfree(info);
 }
 
 void sde_cp_clear_state_info(struct drm_crtc_state *state)

+ 7 - 8
msm/sde/sde_connector.c

@@ -810,6 +810,10 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
 	}
 
 	bl_config = &dsi_display->panel->bl_config;
+	bl_config->bl_scale = c_conn->bl_scale > MAX_BL_SCALE_LEVEL ?
+			MAX_BL_SCALE_LEVEL : c_conn->bl_scale;
+	bl_config->bl_scale_sv = c_conn->bl_scale_sv > SV_BL_SCALE_CAP ?
+			SV_BL_SCALE_CAP : c_conn->bl_scale_sv;
 
 	if (!c_conn->allow_bl_update) {
 		c_conn->unset_bl_level = bl_config->bl_level;
@@ -819,11 +823,6 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
 	if (c_conn->unset_bl_level)
 		bl_config->bl_level = c_conn->unset_bl_level;
 
-	bl_config->bl_scale = c_conn->bl_scale > MAX_BL_SCALE_LEVEL ?
-			MAX_BL_SCALE_LEVEL : c_conn->bl_scale;
-	bl_config->bl_scale_sv = c_conn->bl_scale_sv > SV_BL_SCALE_CAP ?
-			SV_BL_SCALE_CAP : c_conn->bl_scale_sv;
-
 	SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
 		bl_config->bl_scale, bl_config->bl_scale_sv,
 		bl_config->bl_level);
@@ -2765,7 +2764,7 @@ static void sde_connector_check_status_work(struct work_struct *work)
 	dev = conn->base.dev->dev;
 
 	if (!conn->ops.check_status || dev->power.is_suspended ||
-			(conn->dpms_mode != DRM_MODE_DPMS_ON)) {
+			(conn->lp_mode == SDE_MODE_DPMS_OFF)) {
 		SDE_DEBUG("dpms mode: %d\n", conn->dpms_mode);
 		mutex_unlock(&conn->lock);
 		return;
@@ -2909,7 +2908,7 @@ int sde_connector_set_blob_data(struct drm_connector *conn,
 		return -EINVAL;
 	}
 
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	info = vzalloc(sizeof(*info));
 	if (!info)
 		return -ENOMEM;
 
@@ -2967,7 +2966,7 @@ int sde_connector_set_blob_data(struct drm_connector *conn,
 			SDE_KMS_INFO_DATALEN(info),
 			prop_id);
 exit:
-	kfree(info);
+	vfree(info);
 
 	return rc;
 }

+ 69 - 22
msm/sde/sde_crtc.c

@@ -71,6 +71,8 @@ static int sde_crtc_mmrm_interrupt_handler(struct drm_crtc *crtc_drm,
 	bool en, struct sde_irq_callback *idle_irq);
 static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
 		struct sde_irq_callback *noirq);
+static int sde_crtc_frame_data_interrupt_handler(struct drm_crtc *crtc_drm,
+	bool en, struct sde_irq_callback *idle_irq);
 static int _sde_crtc_set_noise_layer(struct sde_crtc *sde_crtc,
 				struct sde_crtc_state *cstate,
 				void __user *usr_ptr);
@@ -89,6 +91,7 @@ static struct sde_crtc_custom_events custom_events[] = {
 	{DRM_EVENT_LTM_OFF, sde_cp_ltm_off_event_handler},
 	{DRM_EVENT_MMRM_CB, sde_crtc_mmrm_interrupt_handler},
 	{DRM_EVENT_VM_RELEASE, sde_crtc_vm_release_handler},
+	{DRM_EVENT_FRAME_DATA, sde_crtc_frame_data_interrupt_handler},
 };
 
 /* default input fence timeout, in ms */
@@ -576,9 +579,10 @@ static const struct attribute_group *sde_crtc_attr_groups[] = {
 	NULL,
 };
 
-static void sde_crtc_event_notify(struct drm_crtc *crtc, uint32_t type, uint32_t len, uint64_t val)
+static void sde_crtc_event_notify(struct drm_crtc *crtc, uint32_t type, void *payload, uint32_t len)
 {
 	struct drm_event event;
+	uint32_t *data = (uint32_t *)payload;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -587,10 +591,12 @@ static void sde_crtc_event_notify(struct drm_crtc *crtc, uint32_t type, uint32_t
 
 	event.type = type;
 	event.length = len;
-	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)&val);
+	msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)payload);
 
-	SDE_EVT32(DRMID(crtc), type, len, val >> 32, val & 0xFFFFFFFF);
-	SDE_DEBUG("crtc:%d event(%d) value(%llu) notified\n", DRMID(crtc), type, val);
+	SDE_EVT32(DRMID(crtc), type, len, *data,
+			((uint64_t)payload) >> 32, ((uint64_t)payload) & 0xFFFFFFFF);
+	SDE_DEBUG("crtc:%d event(%lu) ptr(%pK) value(%lu) notified\n",
+			DRMID(crtc), type, payload, *data);
 }
 
 static void sde_crtc_destroy(struct drm_crtc *crtc)
@@ -2495,6 +2501,7 @@ static int _sde_crtc_get_frame_data_buffer(struct drm_crtc *crtc, uint32_t fd)
 		return -ENOMEM;
 
 	sde_crtc->frame_data.buf[cur_buf] = buf;
+	buf->fd = fd;
 	buf->fb = drm_framebuffer_lookup(crtc->dev, NULL, fd);
 	if (!buf->fb) {
 		SDE_ERROR("unable to get fb");
@@ -2568,8 +2575,8 @@ static void _sde_crtc_frame_data_notify(struct drm_crtc *crtc,
 	buf.fd = sde_crtc->frame_data.buf[cur_buf]->fd;
 	buf.offset = msm_gem->offset;
 
-	sde_crtc_event_notify(crtc, DRM_EVENT_FRAME_DATA, sizeof(struct sde_drm_frame_data_buf),
-			(uint64_t)(&buf));
+	sde_crtc_event_notify(crtc, DRM_EVENT_FRAME_DATA, &buf,
+			sizeof(struct sde_drm_frame_data_buf));
 
 	sde_crtc->frame_data.idx = ++sde_crtc->frame_data.idx % sde_crtc->frame_data.cnt;
 }
@@ -2960,6 +2967,10 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state)
 {
 	struct sde_crtc *sde_crtc;
+	struct sde_splash_display *splash_display = NULL;
+	struct sde_kms *sde_kms;
+	bool cont_splash_enabled = false;
+	int i;
 	u32 power_on = 1;
 
 	if (!crtc || !crtc->state) {
@@ -2970,8 +2981,17 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
 	sde_crtc = to_sde_crtc(crtc);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
 
-	if (crtc->state->active_changed && crtc->state->active)
-		sde_crtc_event_notify(crtc, DRM_EVENT_CRTC_POWER, sizeof(u32), power_on);
+	sde_kms = _sde_crtc_get_kms(crtc);
+
+	for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
+		splash_display = &sde_kms->splash_data.splash_display[i];
+		if (splash_display->cont_splash_enabled &&
+				crtc == splash_display->encoder->crtc)
+			cont_splash_enabled = true;
+	}
+
+	if ((crtc->state->active_changed || cont_splash_enabled) && crtc->state->active)
+		sde_crtc_event_notify(crtc, DRM_EVENT_CRTC_POWER, &power_on, sizeof(u32));
 
 	sde_core_perf_crtc_update(crtc, 0, false);
 }
@@ -3713,6 +3733,9 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
 		_sde_crtc_setup_is_ppsplit(crtc->state);
 		_sde_crtc_setup_lm_bounds(crtc, crtc->state);
 		_sde_crtc_clear_all_blend_stages(sde_crtc);
+	} else if (sde_crtc->num_mixers && sde_crtc->reinit_crtc_mixers) {
+		_sde_crtc_setup_mixers(crtc);
+		sde_crtc->reinit_crtc_mixers = false;
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -3738,13 +3761,12 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
 	_sde_crtc_dest_scaler_setup(crtc);
 	sde_cp_crtc_apply_noise(crtc, old_state);
 
-	if (crtc->state->mode_changed || sde_kms->perf.catalog->uidle_cfg.dirty) {
+	if (crtc->state->mode_changed || sde_kms->perf.catalog->uidle_cfg.dirty)
 		sde_core_perf_crtc_update_uidle(crtc, true);
-	} else if (!test_bit(SDE_CRTC_DIRTY_UIDLE, &sde_crtc->revalidate_mask) &&
-			sde_kms->perf.uidle_enabled)
-		sde_core_perf_uidle_setup_ctl(crtc, false);
 
-	test_and_clear_bit(SDE_CRTC_DIRTY_UIDLE, &sde_crtc->revalidate_mask);
+	/* update cached_encoder_mask if new conn is added or removed */
+	if (crtc->state->connectors_changed)
+		sde_crtc->cached_encoder_mask = crtc->state->encoder_mask;
 
 	/*
 	 * Since CP properties use AXI buffer to program the
@@ -4434,7 +4456,6 @@ void sde_crtc_reset_sw_state(struct drm_crtc *crtc)
 
 	/* mark other properties which need to be dirty for next update */
 	set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, &sde_crtc->revalidate_mask);
-	set_bit(SDE_CRTC_DIRTY_UIDLE, &sde_crtc->revalidate_mask);
 	if (cstate->num_ds_enabled)
 		set_bit(SDE_CRTC_DIRTY_DEST_SCALER, cstate->dirty);
 }
@@ -4479,7 +4500,7 @@ static void sde_crtc_mmrm_cb_notification(struct drm_crtc *crtc)
 			kms->perf.clk_name);
 
 	/* notify user space the reduced clk rate */
-	sde_crtc_event_notify(crtc, DRM_EVENT_MMRM_CB, sizeof(unsigned long), requested_clk);
+	sde_crtc_event_notify(crtc, DRM_EVENT_MMRM_CB, &requested_clk, sizeof(unsigned long));
 
 	SDE_DEBUG("crtc[%d]: MMRM cb notified clk:%d\n",
 		crtc->base.id, requested_clk);
@@ -4553,7 +4574,7 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
 		sde_crtc_reset_sw_state(crtc);
 		sde_cp_crtc_suspend(crtc);
 		power_on = 0;
-		sde_crtc_event_notify(crtc, DRM_EVENT_SDE_POWER, sizeof(u32), power_on);
+		sde_crtc_event_notify(crtc, DRM_EVENT_SDE_POWER, &power_on, sizeof(u32));
 		break;
 	case SDE_POWER_EVENT_MMRM_CALLBACK:
 		sde_crtc_mmrm_cb_notification(crtc);
@@ -4716,7 +4737,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
 	sde_cp_crtc_disable(crtc);
 
 	power_on = 0;
-	sde_crtc_event_notify(crtc, DRM_EVENT_CRTC_POWER, sizeof(u32), power_on);
+	sde_crtc_event_notify(crtc, DRM_EVENT_CRTC_POWER, &power_on, sizeof(u32));
 
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
@@ -4738,11 +4759,18 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
 	struct sde_crtc_state *cstate;
 	struct msm_display_mode *msm_mode;
 	enum sde_intf_mode intf_mode;
+	struct sde_kms *kms;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
+	kms = _sde_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		SDE_ERROR("invalid kms handle\n");
+		return;
+	}
+
 	priv = crtc->dev->dev_private;
 	cstate = to_sde_crtc_state(crtc->state);
 
@@ -4767,7 +4795,8 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
 		intf_mode = sde_crtc_get_intf_mode(crtc, crtc->state);
 		if ((intf_mode != INTF_MODE_WB_BLOCK) && (intf_mode != INTF_MODE_WB_LINE)) {
 			/* max possible vsync_cnt(atomic_t) soft counter */
-			drm_crtc_set_max_vblank_count(crtc, INT_MAX);
+			if (kms->catalog->has_precise_vsync_ts)
+				drm_crtc_set_max_vblank_count(crtc, INT_MAX);
 			drm_crtc_vblank_on(crtc);
 		}
 	}
@@ -5662,6 +5691,8 @@ static u32 sde_crtc_get_vblank_counter(struct drm_crtc *crtc)
 {
 	struct drm_encoder *encoder;
 	struct sde_crtc *sde_crtc;
+	bool is_built_in;
+	u32 vblank_cnt;
 
 	if (!crtc)
 		return 0;
@@ -5672,7 +5703,14 @@ static u32 sde_crtc_get_vblank_counter(struct drm_crtc *crtc)
 		if (sde_encoder_in_clone_mode(encoder))
 			continue;
 
-		return sde_encoder_get_frame_count(encoder);
+		is_built_in = sde_encoder_is_built_in_display(encoder);
+		vblank_cnt = sde_encoder_get_frame_count(encoder);
+
+		SDE_EVT32(DRMID(crtc), DRMID(encoder), is_built_in, vblank_cnt);
+		SDE_DEBUG("crtc:%d enc:%d is_built_in:%d vblank_cnt:%d\n",
+				DRMID(crtc), DRMID(encoder), is_built_in, vblank_cnt);
+
+		return vblank_cnt;
 	}
 
 	return 0;
@@ -5954,7 +5992,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 		return;
 	}
 
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	info = vzalloc(sizeof(struct sde_kms_info));
 	if (!info) {
 		SDE_ERROR("failed to allocate info memory\n");
 		return;
@@ -6058,7 +6096,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 		msm_property_install_range(&sde_crtc->property_info, "frame_data",
 				0x0, 0, ~0, 0, CRTC_PROP_FRAME_DATA_BUF);
 
-	kfree(info);
+	vfree(info);
 }
 
 static int _sde_crtc_get_output_fence(struct drm_crtc *crtc,
@@ -7610,6 +7648,13 @@ static int sde_crtc_vm_release_handler(struct drm_crtc *crtc_drm,
 {
 	return 0;
 }
+
+static int sde_crtc_frame_data_interrupt_handler(struct drm_crtc *crtc_drm,
+	bool en, struct sde_irq_callback *irq)
+{
+	return 0;
+}
+
 /**
  * sde_crtc_update_cont_splash_settings - update mixer settings
  *	and initial clk during device bootup for cont_splash use case
@@ -7769,5 +7814,7 @@ void sde_crtc_disable_cp_features(struct drm_crtc *crtc)
 
 void _sde_crtc_vm_release_notify(struct drm_crtc *crtc)
 {
-	sde_crtc_event_notify(crtc, DRM_EVENT_VM_RELEASE, sizeof(uint32_t), 1);
+	uint32_t val = 1;
+
+	sde_crtc_event_notify(crtc, DRM_EVENT_VM_RELEASE, &val, sizeof(uint32_t));
 }

+ 2 - 1
msm/sde/sde_crtc.h

@@ -301,6 +301,7 @@ struct sde_frame_data {
  * @ltm_buffer_lock : muttx to protect ltm_buffers allcation and free
  * @ltm_lock        : Spinlock to protect ltm buffer_cnt, hist_en and ltm lists
  * @needs_hw_reset  : Initiate a hw ctl reset
+ * @reinit_crtc_mixers : Reinitialize mixers in crtc
  * @hist_irq_idx    : hist interrupt irq idx
  * @disable_pending_cp : flag tracks pending color processing features force disable
  * @src_bpp         : source bpp used to calculate compression ratio
@@ -400,6 +401,7 @@ struct sde_crtc {
 	struct mutex ltm_buffer_lock;
 	spinlock_t ltm_lock;
 	bool needs_hw_reset;
+	bool reinit_crtc_mixers;
 	int hist_irq_idx;
 	bool disable_pending_cp;
 
@@ -425,7 +427,6 @@ enum sde_crtc_dirty_flags {
 	SDE_CRTC_DIRTY_DEST_SCALER,
 	SDE_CRTC_DIRTY_DIM_LAYERS,
 	SDE_CRTC_NOISE_LAYER,
-	SDE_CRTC_DIRTY_UIDLE,
 	SDE_CRTC_DIRTY_MAX,
 };
 

+ 72 - 10
msm/sde/sde_encoder.c

@@ -142,7 +142,8 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 		if (phys && phys->hw_ctl && phys->hw_ctl->ops.uidle_enable) {
-			SDE_EVT32(DRMID(drm_enc), enable);
+			if (enable)
+				SDE_EVT32(DRMID(drm_enc), enable);
 			phys->hw_ctl->ops.uidle_enable(phys->hw_ctl, enable);
 		}
 	}
@@ -219,6 +220,36 @@ ktime_t sde_encoder_calc_last_vsync_timestamp(struct drm_encoder *drm_enc)
 	return tvblank;
 }
 
+static void _sde_encoder_control_fal10_veto(struct drm_encoder *drm_enc, bool veto)
+{
+	bool clone_mode;
+	struct sde_kms *sde_kms = sde_encoder_get_kms(drm_enc);
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (sde_kms->catalog && !sde_kms->catalog->uidle_cfg.uidle_rev)
+		return;
+
+	if (!sde_kms->hw_uidle || !sde_kms->hw_uidle->ops.uidle_fal10_override) {
+		SDE_ERROR("invalid args\n");
+		return;
+	}
+
+	/*
+	 * clone mode is the only scenario where we want to enable software override
+	 * of fal10 veto.
+	 */
+	clone_mode = sde_encoder_in_clone_mode(drm_enc);
+	SDE_EVT32(DRMID(drm_enc), clone_mode, veto);
+
+	if (clone_mode && veto) {
+		sde_kms->hw_uidle->ops.uidle_fal10_override(sde_kms->hw_uidle, veto);
+		sde_enc->fal10_veto_override = true;
+	} else if (sde_enc->fal10_veto_override && !veto) {
+		sde_kms->hw_uidle->ops.uidle_fal10_override(sde_kms->hw_uidle, veto);
+		sde_enc->fal10_veto_override = false;
+	}
+}
+
 static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
@@ -1132,10 +1163,8 @@ static int _sde_encoder_atomic_check_qsync(struct sde_connector *sde_conn,
 	qsync_dirty = msm_property_is_dirty(&sde_conn->property_info,
 			&sde_conn_state->property_state, CONNECTOR_PROP_QSYNC_MODE);
 
-	if (has_modeset && qsync_dirty &&
-			(msm_is_mode_seamless_poms(&sde_conn_state->msm_mode) ||
-			 msm_is_mode_seamless_dms(&sde_conn_state->msm_mode) ||
-			 msm_is_mode_seamless_dyn_clk(&sde_conn_state->msm_mode))) {
+	if (has_modeset && qsync_dirty && (msm_is_mode_seamless_poms(&sde_conn_state->msm_mode) ||
+				msm_is_mode_seamless_dyn_clk(&sde_conn_state->msm_mode))) {
 		SDE_ERROR("invalid qsync update during modeset priv flag:%x\n",
 				sde_conn_state->msm_mode.private_flags);
 		return -EINVAL;
@@ -2451,17 +2480,37 @@ static void _sde_encoder_virt_populate_hw_res(struct drm_encoder *drm_enc)
 }
 
 static int sde_encoder_virt_modeset_rc(struct drm_encoder *drm_enc,
-		struct msm_display_mode *msm_mode, bool pre_modeset)
+	struct drm_display_mode *adj_mode, struct msm_display_mode *msm_mode, bool pre_modeset)
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	enum sde_intf_mode intf_mode;
+	struct drm_display_mode *old_adj_mode = NULL;
 	int ret;
-	bool is_cmd_mode = false;
+	bool is_cmd_mode = false, res_switch = false;
 
 	if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
 		is_cmd_mode = true;
 
 	if (pre_modeset) {
+		if (sde_enc->cur_master)
+			old_adj_mode = &sde_enc->cur_master->cached_mode;
+		if (old_adj_mode && is_cmd_mode)
+			res_switch = !drm_mode_match(old_adj_mode, adj_mode,
+					DRM_MODE_MATCH_TIMINGS);
+
+		if (res_switch && sde_enc->disp_info.is_te_using_watchdog_timer) {
+			/*
+			 * add tx wait for sim panel to avoid wd timer getting
+			 * updated in middle of frame to avoid early vsync
+			 */
+			ret = sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+			if (ret && ret != -EWOULDBLOCK) {
+				SDE_ERROR_ENC(sde_enc, "wait for idle failed %d\n", ret);
+				SDE_EVT32(DRMID(drm_enc), ret, SDE_EVTLOG_ERROR);
+				return ret;
+			}
+		}
+
 		intf_mode = sde_encoder_get_intf_mode(drm_enc);
 		if (msm_is_mode_seamless_dms(msm_mode) ||
 				(msm_is_mode_seamless_dyn_clk(msm_mode) &&
@@ -2510,6 +2559,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	struct drm_connector *conn;
 	struct sde_connector_state *c_state;
 	struct msm_display_mode *msm_mode;
+	struct sde_crtc *sde_crtc;
 	int i = 0, ret;
 	int num_lm, num_intf, num_pp_per_intf;
 
@@ -2541,6 +2591,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	}
 
 	sde_enc->crtc = drm_enc->crtc;
+	sde_crtc = to_sde_crtc(drm_enc->crtc);
 	sde_crtc_set_qos_dirty(drm_enc->crtc);
 
 	/* get and store the mode_info */
@@ -2566,7 +2617,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 
 	/* release resources before seamless mode change */
 	msm_mode = &c_state->msm_mode;
-	ret = sde_encoder_virt_modeset_rc(drm_enc, msm_mode, true);
+	ret = sde_encoder_virt_modeset_rc(drm_enc, adj_mode, msm_mode, true);
 	if (ret)
 		return;
 
@@ -2600,12 +2651,13 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 			phys->hw_pp = sde_enc->hw_pp[i * num_pp_per_intf];
 			phys->connector = conn;
 			if (phys->ops.mode_set)
-				phys->ops.mode_set(phys, mode, adj_mode);
+				phys->ops.mode_set(phys, mode, adj_mode,
+				&sde_crtc->reinit_crtc_mixers);
 		}
 	}
 
 	/* update resources after seamless mode change */
-	sde_encoder_virt_modeset_rc(drm_enc, msm_mode, false);
+	sde_encoder_virt_modeset_rc(drm_enc, adj_mode, msm_mode, false);
 }
 
 void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable)
@@ -2802,6 +2854,7 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 
 	memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
 	memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
+	_sde_encoder_control_fal10_veto(drm_enc, true);
 }
 
 static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
@@ -3065,6 +3118,8 @@ void sde_encoder_virt_reset(struct drm_encoder *drm_enc)
 	struct sde_kms *sde_kms = sde_encoder_get_kms(drm_enc);
 	int i = 0;
 
+	_sde_encoder_control_fal10_veto(drm_enc, false);
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		if (sde_enc->phys_encs[i]) {
 			sde_enc->phys_encs[i]->cont_splash_enabled = false;
@@ -3089,6 +3144,7 @@ void sde_encoder_virt_reset(struct drm_encoder *drm_enc)
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
+	struct sde_connector *sde_conn;
 	struct sde_kms *sde_kms;
 	enum sde_intf_mode intf_mode;
 	int ret, i = 0;
@@ -3110,6 +3166,11 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master) {
+		SDE_ERROR("Invalid cur_master\n");
+		return;
+	}
+	sde_conn = to_sde_connector(sde_enc->cur_master->connector);
 	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	sde_kms = sde_encoder_get_kms(&sde_enc->base);
@@ -3126,6 +3187,7 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 
 	_sde_encoder_input_handler_unregister(drm_enc);
 
+	flush_delayed_work(&sde_conn->status_work);
 	/*
 	 * For primary command mode and video mode encoders, execute the
 	 * resource control pre-stop operations before the physical encoders

+ 3 - 1
msm/sde/sde_encoder.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -170,6 +170,7 @@ enum sde_enc_rc_states {
  * @cur_conn_roi:		current connector roi
  * @prv_conn_roi:		previous connector roi to optimize if unchanged
  * @crtc			pointer to drm_crtc
+ * @fal10_veto_override:	software override for micro idle fal10 veto
  * @recovery_events_enabled:	status of hw recovery feature enable by client
  * @elevated_ahb_vote:		increase AHB bus speed for the first frame
  *				after power collapse
@@ -244,6 +245,7 @@ struct sde_encoder_virt {
 	struct sde_rect prv_conn_roi;
 	struct drm_crtc *crtc;
 
+	bool fal10_veto_override;
 	bool recovery_events_enabled;
 	bool elevated_ahb_vote;
 	struct dev_pm_qos_request pm_qos_cpu_req[NR_CPUS];

+ 1 - 1
msm/sde/sde_encoder_phys.h

@@ -148,7 +148,7 @@ struct sde_encoder_phys_ops {
 			struct drm_display_mode *adjusted_mode);
 	void (*mode_set)(struct sde_encoder_phys *encoder,
 			struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode);
+			struct drm_display_mode *adjusted_mode, bool *reinit_mixers);
 	void (*cont_splash_mode_set)(struct sde_encoder_phys *encoder,
 			struct drm_display_mode *adjusted_mode);
 	void (*enable)(struct sde_encoder_phys *encoder);

+ 9 - 3
msm/sde/sde_encoder_phys_cmd.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -431,7 +431,7 @@ static void sde_encoder_phys_cmd_cont_splash_mode_set(
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
+		struct drm_display_mode *adj_mode, bool *reinit_mixers)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
@@ -452,8 +452,14 @@ static void sde_encoder_phys_cmd_mode_set(
 	/* Retrieve previously allocated HW Resources. Shouldn't fail */
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
+		if (sde_rm_get_hw(rm, &iter)) {
+			if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
+				*reinit_mixers =  true;
+				SDE_EVT32(phys_enc->hw_ctl->idx,
+						to_sde_hw_ctl(iter.hw)->idx);
+			}
 			phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
+		}
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {

+ 42 - 2
msm/sde/sde_encoder_phys_vid.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -606,7 +607,7 @@ static void sde_encoder_phys_vid_cont_splash_mode_set(
 static void sde_encoder_phys_vid_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
+		struct drm_display_mode *adj_mode, bool *reinit_mixers)
 {
 	struct sde_rm *rm;
 	struct sde_rm_hw_iter iter;
@@ -632,8 +633,14 @@ static void sde_encoder_phys_vid_mode_set(
 	/* Retrieve previously allocated HW Resources. Shouldn't fail */
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
+		if (sde_rm_get_hw(rm, &iter)) {
+			if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
+				*reinit_mixers =  true;
+				SDE_EVT32(phys_enc->hw_ctl->idx,
+						to_sde_hw_ctl(iter.hw)->idx);
+			}
 			phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
+		}
 	}
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
 		SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
@@ -1122,12 +1129,41 @@ exit:
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
+static int sde_encoder_phys_vid_poll_for_active_region(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+	struct intf_timing_params *timing;
+	u32 line_cnt, v_inactive, poll_time_us, trial = 0;
+
+	if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	timing = &vid_enc->timing_params;
+
+	/* if programmable fetch is not enabled return early */
+	if (!programmable_fetch_get_num_lines(vid_enc, timing))
+		return 0;
+
+	poll_time_us = DIV_ROUND_UP(1000000, timing->vrefresh) / MAX_POLL_CNT;
+	v_inactive = timing->v_front_porch + timing->v_back_porch + timing->vsync_pulse_width;
+
+	do {
+		usleep_range(poll_time_us, poll_time_us + 5);
+		line_cnt = phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
+		trial++;
+	} while ((trial < MAX_POLL_CNT) || (line_cnt < v_inactive));
+
+	return (trial >= MAX_POLL_CNT) ? -ETIMEDOUT : 0;
+}
+
 static void sde_encoder_phys_vid_handle_post_kickoff(
 		struct sde_encoder_phys *phys_enc)
 {
 	unsigned long lock_flags;
 	struct sde_encoder_phys_vid *vid_enc;
 	u32 avr_mode;
+	u32 ret;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -1150,6 +1186,10 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
 				1);
 			spin_unlock_irqrestore(phys_enc->enc_spinlock,
 				lock_flags);
+
+			ret = sde_encoder_phys_vid_poll_for_active_region(phys_enc);
+			if (ret)
+				SDE_DEBUG_VIDENC(vid_enc, "poll for active failed ret:%d\n", ret);
 		}
 		phys_enc->enable_state = SDE_ENC_ENABLED;
 	}

+ 16 - 7
msm/sde/sde_encoder_phys_wb.c

@@ -447,7 +447,8 @@ static void _sde_encoder_phys_wb_setup_out_cfg(struct sde_encoder_phys *phys_enc
 			wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_size[0],
 			wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_size[1],
 			wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_size[2],
-			wb_cfg->dest.plane_addr[3], wb_cfg->dest.plane_size[3]);
+			wb_cfg->dest.plane_addr[3], wb_cfg->dest.plane_size[3],
+			wb_cfg->roi.x, wb_cfg->roi.y, wb_cfg->roi.w, wb_cfg->roi.h);
 		hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
 	}
 }
@@ -902,9 +903,10 @@ static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
 	}
 
 	if (((wb_roi.w < out_width) || (wb_roi.h < out_height)) &&
-			(wb_roi.w * wb_roi.h * fmt->bpp) % 256) {
-		SDE_ERROR("invalid stride w = %d h = %d bpp =%d out_width = %d, out_height = %d\n",
-				wb_roi.w, wb_roi.h, fmt->bpp, out_width, out_height);
+			((wb_roi.w * wb_roi.h * fmt->bpp) % 256) && !SDE_FORMAT_IS_LINEAR(fmt)) {
+		SDE_ERROR("invalid stride w=%d h=%d bpp=%d out_width=%d, out_height=%d lin=%d\n",
+				wb_roi.w, wb_roi.h, fmt->bpp, out_width, out_height,
+				SDE_FORMAT_IS_LINEAR(fmt));
 		return -EINVAL;
 	}
 
@@ -1646,8 +1648,10 @@ static void sde_encoder_phys_wb_irq_ctrl(struct sde_encoder_phys *phys, bool ena
  * @mode:	Pointer to requested display mode
  * @adj_mode:	Pointer to adjusted display mode
  */
-static void sde_encoder_phys_wb_mode_set(struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *mode, struct drm_display_mode *adj_mode)
+static void sde_encoder_phys_wb_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode, bool *reinit_mixers)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_rm *rm = &phys_enc->sde_kms->rm;
@@ -1669,8 +1673,13 @@ static void sde_encoder_phys_wb_mode_set(struct sde_encoder_phys *phys_enc,
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
 		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
+		if (i == instance) {
+			if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
+				*reinit_mixers =  true;
+				SDE_EVT32(phys_enc->hw_ctl->idx, to_sde_hw_ctl(iter.hw)->idx);
+			}
 			phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
+		}
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {

+ 43 - 20
msm/sde/sde_fence.c

@@ -1,17 +1,20 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/sync_file.h>
 #include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
 #include "msm_drv.h"
 #include "sde_kms.h"
 #include "sde_fence.h"
 
 #define TIMELINE_VAL_LENGTH		128
 #define SPEC_FENCE_FLAG_FENCE_ARRAY	0x10
+#define SPEC_FENCE_FLAG_ARRAY_BIND	0x11
 
 void *sde_sync_get(uint64_t fd)
 {
@@ -25,11 +28,47 @@ void sde_sync_put(void *fence)
 		dma_fence_put(fence);
 }
 
+void sde_fence_dump(struct dma_fence *fence)
+{
+	char timeline_str[TIMELINE_VAL_LENGTH];
+
+	if (fence->ops->timeline_value_str)
+		fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
+
+	SDE_ERROR(
+		"fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
+		fence->ops->get_driver_name(fence),
+		fence->ops->get_timeline_name(fence),
+		fence->seqno, timeline_str,
+		fence->ops->signaled ?
+		fence->ops->signaled(fence) : 0xffffffff,
+		dma_fence_get_status(fence), fence->flags);
+}
+
+static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
+{
+	struct dma_fence_array *array;
+	struct dma_fence *user_fence;
+	int i;
+
+	array = container_of(base_fence, struct dma_fence_array, base);
+	if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
+		test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
+		for (i = 0; i < array->num_fences; i++) {
+			user_fence = array->fences[i];
+			if (user_fence) {
+				dma_fence_get(user_fence);
+				sde_fence_dump(user_fence);
+				dma_fence_put(user_fence);
+			}
+		}
+	}
+}
+
 signed long sde_sync_wait(void *fnc, long timeout_ms)
 {
 	struct dma_fence *fence = fnc;
 	int rc, status = 0;
-	char timeline_str[TIMELINE_VAL_LENGTH];
 
 	if (!fence)
 		return -EINVAL;
@@ -39,10 +78,6 @@ signed long sde_sync_wait(void *fnc, long timeout_ms)
 	rc = dma_fence_wait_timeout(fence, true,
 				msecs_to_jiffies(timeout_ms));
 	if (!rc || (rc == -EINVAL) || fence->error) {
-		if (fence->ops->timeline_value_str)
-			fence->ops->timeline_value_str(fence,
-					timeline_str, TIMELINE_VAL_LENGTH);
-
 		status = dma_fence_get_status(fence);
 		if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
 			if (status == -EINVAL) {
@@ -51,23 +86,11 @@ signed long sde_sync_wait(void *fnc, long timeout_ms)
 			} else if (fence->ops->signaled && fence->ops->signaled(fence)) {
 				SDE_INFO("spec fence status:%d\n", status);
 			} else {
-				SDE_ERROR(
-					"fence driver name:%s timeline name:%s signaled:0x%x status:%d flags:0x%x rc:%d\n",
-					fence->ops->get_driver_name(fence),
-					fence->ops->get_timeline_name(fence),
-					fence->ops->signaled ?
-					fence->ops->signaled(fence) : 0xffffffff,
-					status, fence->flags, rc);
+				sde_fence_dump(fence);
+				sde_fence_dump_user_fds_info(fence);
 			}
 		} else {
-			SDE_ERROR(
-				"fence driver name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d\n",
-				fence->ops->get_driver_name(fence),
-				fence->ops->get_timeline_name(fence),
-				fence->seqno, timeline_str,
-				fence->ops->signaled ?
-				fence->ops->signaled(fence) : 0xffffffff,
-				status);
+			sde_fence_dump(fence);
 		}
 	}
 

+ 3 - 0
msm/sde/sde_hw_catalog.c

@@ -3195,6 +3195,8 @@ static int sde_dsc_parse_dt(struct device_node *np,
 						&dsc->features);
 			if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_900))
 				set_bit(SDE_DSC_4HS, &dsc->features);
+			if (sde_cfg->has_reduced_ob_max)
+				set_bit(SDE_DSC_REDUCED_OB_MAX, &dsc->features);
 		} else {
 			set_bit(SDE_DSC_HW_REV_1_1, &dsc->features);
 		}
@@ -5094,6 +5096,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_CWB_CROP, sde_cfg->features);
 		set_bit(SDE_FEATURE_QSYNC, sde_cfg->features);
 		sde_cfg->perf.min_prefill_lines = 40;
+		sde_cfg->has_reduced_ob_max = true;
 		sde_cfg->vbif_qos_nlvl = 8;
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;

+ 6 - 0
msm/sde/sde_hw_catalog.h

@@ -491,6 +491,7 @@ enum {
  * @SDE_DSC_HW_REV_1_1          dsc block supports dsc 1.1 only
  * @SDE_DSC_HW_REV_1_2          dsc block supports dsc 1.1 and 1.2
  * @SDE_DSC_NATIVE_422_EN,      Supports native422 and native420 encoding
+ * @SDE_DSC_REDUCED_OB_MAX,	DSC size is limited to 10k
  * @SDE_DSC_ENC,                DSC encoder sub block
  * @SDE_DSC_CTL,                DSC ctl sub block
  * @SDE_DSC_4HS,                Dedicated DSC 4HS config registers
@@ -501,6 +502,7 @@ enum {
 	SDE_DSC_HW_REV_1_1,
 	SDE_DSC_HW_REV_1_2,
 	SDE_DSC_NATIVE_422_EN,
+	SDE_DSC_REDUCED_OB_MAX,
 	SDE_DSC_ENC,
 	SDE_DSC_CTL,
 	SDE_DSC_4HS,
@@ -1729,6 +1731,8 @@ struct sde_perf_cfg {
  * @qseed_hw_rev        qseed HW block version
  * @smart_dma_rev       smartDMA block version
  * @ctl_rev             control path block version
+ * @has_precise_vsync_ts  indicates if HW has vsyc timestamp logging capability
+ * @has_reduced_ob_max indicate if DSC size is limited to 10k
  * @ts_prefill_rev      prefill traffic shaper feature revision
  * @true_inline_rot_rev inline rotator feature revision
  * @dnsc_blur_rev       downscale blur HW block version
@@ -1830,6 +1834,8 @@ struct sde_mdss_cfg {
 	u32 qseed_hw_rev;
 	u32 smart_dma_rev;
 	u32 ctl_rev;
+	bool has_precise_vsync_ts;
+	bool has_reduced_ob_max;
 	u32 ts_prefill_rev;
 	u32 true_inline_rot_rev;
 	u32 dnsc_blur_rev;

+ 11 - 1
msm/sde/sde_hw_ds.c

@@ -20,8 +20,18 @@
 static void sde_hw_ds_setup_opmode(struct sde_hw_ds *hw_ds, u32 op_mode)
 {
 	struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
+	u32 op_mode_val;
 
-	SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode);
+	op_mode_val = SDE_REG_READ(hw, DEST_SCALER_OP_MODE);
+
+	if (op_mode)
+		op_mode_val |= op_mode;
+	else if (!op_mode && (op_mode_val & SDE_DS_OP_MODE_DUAL))
+		op_mode_val = 0;
+	else
+		op_mode_val &= ~BIT(hw_ds->idx - DS_0);
+
+	SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode_val);
 }
 
 static void sde_hw_ds_setup_opmode_v1(struct sde_hw_ds *hw_ds, u32 op_mode)

+ 8 - 6
msm/sde/sde_hw_dsc_1_2.c

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
  */
 
 #include "sde_hw_mdss.h"
@@ -62,19 +62,21 @@
 static int _dsc_calc_ob_max_addr(struct sde_hw_dsc *hw_dsc, int num_ss)
 {
 	enum sde_dsc idx;
+	bool reduced_ob_max;
 
 	idx = hw_dsc->idx;
+	reduced_ob_max = hw_dsc->caps->features & BIT(SDE_DSC_REDUCED_OB_MAX);
 
 	if (!(hw_dsc->caps->features & BIT(SDE_DSC_NATIVE_422_EN))) {
 		if (num_ss == 1)
-			return 2399;
+			return reduced_ob_max ? 1199 : 2399;
 		else if (num_ss == 2)
-			return 1199;
+			return reduced_ob_max ? 599 : 1199;
 	} else {
 		if (num_ss == 1)
-			return 1199;
+			return reduced_ob_max ? 599 : 1199;
 		else if (num_ss == 2)
-			return 599;
+			return reduced_ob_max ? 299 : 599;
 	}
 	return 0;
 }

+ 11 - 2
msm/sde/sde_hw_intf.c

@@ -625,10 +625,12 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 cfg = 0;
+	spinlock_t tearcheck_spinlock;
 
 	if (!intf)
 		return -EINVAL;
 
+	spin_lock_init(&tearcheck_spinlock);
 	c = &intf->hw;
 
 	if (te->hw_vsync_mode)
@@ -636,6 +638,14 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
 
 	cfg |= te->vsync_count;
 
+	/*
+	 * Local spinlock is acquired here to avoid pre-emption
+	 * as below register programming should be completed in
+	 * less than 2^16 vsync clk cycles.
+	 */
+	spin_lock(&tearcheck_spinlock);
+	SDE_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
+			(te->start_pos + te->sync_threshold_start + 1));
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
 	wmb(); /* disable vsync counter before updating single buffer registers */
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
@@ -646,10 +656,9 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
 			((te->sync_threshold_continue << 16) |
 			 te->sync_threshold_start));
-	SDE_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
-			(te->start_pos + te->sync_threshold_start + 1));
 	cfg |= BIT(19); /* VSYNC_COUNTER_EN */
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
+	spin_unlock(&tearcheck_spinlock);
 
 	return 0;
 }

+ 20 - 1
msm/sde/sde_hw_uidle.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  *
  */
@@ -162,7 +163,7 @@ void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
 {
 	struct sde_hw_blk_reg_map *c = &uidle->hw;
 	bool enable = false;
-	u32 reg_val;
+	u32 reg_val, fal10_veto_regval = 0;
 
 	reg_val = SDE_REG_READ(c, UIDLE_CTL);
 
@@ -183,6 +184,10 @@ void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
 		FAL10_EXIT_CNT_MSK);
 
 	SDE_REG_WRITE(c, UIDLE_CTL, reg_val);
+	if (!enable)
+		fal10_veto_regval |= (BIT(31) | BIT(0));
+
+	SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, fal10_veto_regval);
 }
 
 static void sde_hw_uilde_active_override(struct sde_hw_uidle *uidle,
@@ -197,6 +202,19 @@ static void sde_hw_uilde_active_override(struct sde_hw_uidle *uidle,
 	SDE_REG_WRITE(c, UIDLE_QACTIVE_HF_OVERRIDE, reg_val);
 }
 
+static void sde_hw_uidle_fal10_override(struct sde_hw_uidle *uidle,
+		bool enable)
+{
+	struct sde_hw_blk_reg_map *c = &uidle->hw;
+	u32 reg_val = 0;
+
+	if (enable)
+		reg_val = BIT(0) | BIT(31);
+
+	SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, reg_val);
+	wmb();
+}
+
 static inline void _setup_uidle_ops(struct sde_hw_uidle_ops *ops,
 		unsigned long cap)
 {
@@ -207,6 +225,7 @@ static inline void _setup_uidle_ops(struct sde_hw_uidle_ops *ops,
 	ops->uidle_get_status = sde_hw_uidle_get_status;
 	if (cap & BIT(SDE_UIDLE_QACTIVE_OVERRIDE))
 		ops->active_override_enable = sde_hw_uilde_active_override;
+	ops->uidle_fal10_override = sde_hw_uidle_fal10_override;
 }
 
 struct sde_hw_uidle *sde_hw_uidle_init(enum sde_uidle idx,

+ 7 - 0
msm/sde/sde_hw_uidle.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  *
  */
@@ -118,6 +119,12 @@ struct sde_hw_uidle_ops {
 	 */
 	void (*active_override_enable)(struct sde_hw_uidle *uidle,
 			bool enable);
+	/**
+	 * uidle_fal10_overrride - enable/disable fal10 override
+	 * @uidle: uidle context driver
+	 * @enable: enable/disable
+	 */
+	void (*uidle_fal10_override)(struct sde_hw_uidle *uidle, bool enable);
 };
 
 struct sde_hw_uidle {

+ 4 - 7
msm/sde/sde_hw_wb.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -437,12 +438,7 @@ static void sde_hw_wb_program_cwb_dither_ctrl(struct sde_hw_wb *ctx,
 		return;
 	}
 
-	/* map to pp_id from dcwb id */
-	if (dcwb_idx == DCWB_0) {
-		pp_id = PINGPONG_CWB_0;
-	} else if (dcwb_idx == DCWB_1) {
-		pp_id = PINGPONG_CWB_1;
-	} else {
+	if (dcwb_idx >= DCWB_MAX) {
 		DRM_ERROR("Invalid dcwb_idx %d\n", dcwb_idx);
 		return;
 	}
@@ -450,7 +446,8 @@ static void sde_hw_wb_program_cwb_dither_ctrl(struct sde_hw_wb *ctx,
 	/* find pp blk with pp_id */
 	for (idx = 0; idx < DCWB_MAX - DCWB_0; ++idx) {
 		pp = &ctx->dcwb_pp_hw[idx];
-		if (pp && pp->idx == pp_id) {
+		if (pp && dcwb_idx == idx + 1) {
+			pp_id = pp->idx;
 			found = true;
 			break;
 		}

+ 24 - 9
msm/sde/sde_kms.c

@@ -1337,9 +1337,11 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
 	/* if vm_req is enabled, once CRTC on the commit is guaranteed */
 	sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
 
+	sde_dbg_set_hw_ownership_status(false);
+
 	sde_kms_cancel_delayed_work(crtc);
 
-	/* disable SDE irq's */
+	/* disable SDE encoder irq's */
 	drm_for_each_encoder_mask(encoder, crtc->dev,
 					crtc->state->encoder_mask) {
 		if (sde_encoder_in_clone_mode(encoder))
@@ -1349,8 +1351,6 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
 	}
 
 	if (is_primary) {
-		/* disable IRQ line */
-		sde_irq_update(&sde_kms->base, false);
 
 		/* disable vblank events */
 		drm_crtc_vblank_off(crtc);
@@ -1359,8 +1359,6 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
 		sde_crtc_reset_sw_state(crtc);
 	}
 
-	sde_dbg_set_hw_ownership_status(false);
-
 	return rc;
 }
 
@@ -1444,17 +1442,22 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
 	/* properly handoff color processing features */
 	sde_cp_crtc_vm_primary_handoff(crtc);
 
+	sde_vm_lock(sde_kms);
+
 	/* handle non-SDE clients pre-release */
 	if (vm_ops->vm_client_pre_release) {
 		rc = vm_ops->vm_client_pre_release(sde_kms);
 		if (rc) {
 			SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
 					rc);
+			sde_vm_unlock(sde_kms);
 			goto exit;
 		}
 	}
 
-	sde_vm_lock(sde_kms);
+	/* disable IRQ line */
+	sde_irq_update(&sde_kms->base, false);
+
 	/* release HW */
 	if (vm_ops->vm_release) {
 		rc = vm_ops->vm_release(sde_kms);
@@ -2858,7 +2861,7 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
 	struct sde_vm_ops *vm_ops;
 	enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
 	int i, rc = 0;
-	bool vm_req_active = false;
+	bool vm_req_active = false, prev_vm_req = false;
 	bool vm_owns_hw;
 
 	if (!kms || !state)
@@ -2872,6 +2875,14 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
 	if (!vm_ops->vm_request_valid || !vm_ops->vm_owns_hw || !vm_ops->vm_acquire)
 		return -EINVAL;
 
+	drm_for_each_crtc(crtc, state->dev) {
+		if (crtc->state && (sde_crtc_get_property(to_sde_crtc_state(crtc->state),
+				CRTC_PROP_VM_REQ_STATE) == VM_REQ_RELEASE)) {
+			prev_vm_req = true;
+			break;
+		}
+	}
+
 	/* check for an active vm request */
 	for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
 		struct sde_crtc_state *old_state = NULL, *new_state = NULL;
@@ -2885,8 +2896,12 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
 		old_state = to_sde_crtc_state(old_cstate);
 		old_vm_req = sde_crtc_get_property(old_state, CRTC_PROP_VM_REQ_STATE);
 
-		/* No active request if the transition is from VM_REQ_NONE to VM_REQ_NONE */
-		if (old_vm_req || new_vm_req) {
+		/*
+		 * VM request should be validated in the following usecases
+		 * - There is a vm request(other than VM_REQ_NONE) on current/prev crtc state.
+		 * - Previously, vm transition has taken place on one of the crtc's.
+		 */
+		if (old_vm_req || new_vm_req || prev_vm_req) {
 			if (!vm_req_active) {
 				sde_vm_lock(sde_kms);
 				vm_owns_hw = sde_vm_owns_hw(sde_kms);

+ 44 - 14
msm/sde/sde_plane.c

@@ -107,6 +107,7 @@ struct sde_plane {
 	struct sde_csc_cfg *csc_usr_ptr;
 	struct sde_csc_cfg *csc_ptr;
 
+	uint32_t cached_lut_flag;
 	struct sde_hw_scaler3_cfg scaler3_cfg;
 	struct sde_hw_pixel_ext pixel_ext;
 
@@ -1992,7 +1993,9 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
 		ret = msm_framebuffer_prepare(fb,
 				pstate->aspace);
 		if (ret) {
-			SDE_ERROR("failed to prepare framebuffer\n");
+			SDE_ERROR("failed to prepare framebuffer fb:%d plane:%d pipe:%d ret:%d\n",
+				 fb->base.id, plane->base.id, psde->pipe, ret);
+			SDE_EVT32(fb->base.id, plane->base.id, psde->pipe, ret, SDE_EVTLOG_ERROR);
 			return ret;
 		}
 	}
@@ -3266,6 +3269,20 @@ static void _sde_plane_update_properties(struct drm_plane *plane,
 	pstate->dirty = 0x0;
 }
 
+static void _sde_plane_check_lut_dirty(struct sde_plane *psde,
+	struct sde_plane_state *pstate)
+{
+	/**
+	 * Valid configuration if scaler is not enabled or
+	 * lut flag is set
+	 */
+	if (pstate->scaler3_cfg.lut_flag || !pstate->scaler3_cfg.enable)
+		return;
+
+	pstate->scaler3_cfg.lut_flag = psde->cached_lut_flag;
+	SDE_EVT32(DRMID(&psde->base), pstate->scaler3_cfg.lut_flag, SDE_EVTLOG_ERROR);
+}
+
 static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 				struct drm_plane_state *old_state)
 {
@@ -3317,10 +3334,15 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 			state->crtc_w, state->crtc_h,
 			state->crtc_x, state->crtc_y);
 
+	/* Caching the valid lut flag in sde plane */
+	if (pstate->scaler3_cfg.enable && pstate->scaler3_cfg.lut_flag)
+		psde->cached_lut_flag = pstate->scaler3_cfg.lut_flag;
+
 	/* force reprogramming of all the parameters, if the flag is set */
 	if (psde->revalidate) {
 		SDE_DEBUG("plane:%d - reconfigure all the parameters\n",
 				plane->base.id);
+		_sde_plane_check_lut_dirty(psde, pstate);
 		pstate->dirty = SDE_PLANE_DIRTY_ALL | SDE_PLANE_DIRTY_CP;
 		psde->revalidate = false;
 	}
@@ -3845,7 +3867,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 	psde->catalog = catalog;
 	is_master = !psde->is_virtual;
 
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	info = vzalloc(sizeof(struct sde_kms_info));
 	if (!info) {
 		SDE_ERROR("failed to allocate info memory\n");
 		return;
@@ -3927,7 +3949,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 	if (psde->pipe_hw->ops.set_ubwc_stats_roi)
 		msm_property_install_range(&psde->property_info, "ubwc_stats_roi",
 				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_UBWC_STATS_ROI);
-	kfree(info);
+	vfree(info);
 }
 
 static inline void _sde_plane_set_csc_v1(struct sde_plane *psde,
@@ -4143,27 +4165,34 @@ static void _sde_plane_set_excl_rect_v1(struct sde_plane *psde,
 }
 
 static void _sde_plane_set_ubwc_stats_roi(struct sde_plane *psde,
-		struct sde_plane_state *pstate, uint64_t roi)
+		struct sde_plane_state *pstate, void __user *usr_ptr)
 {
-	uint16_t y0, y1;
+	struct sde_drm_ubwc_stats_roi roi = {0};
 
 	if (!psde || !pstate) {
 		SDE_ERROR("invalid argument(s)\n");
 		return;
 	}
 
-	y0 = roi & 0xFFFF;
-	y1 = (roi >> 0x10) & 0xFFFF;
+	if (!usr_ptr) {
+		SDE_DEBUG_PLANE(psde, "ubwc roi disabled");
+		goto end;
+	}
+
+	if (copy_from_user(&roi, usr_ptr, sizeof(roi))) {
+		SDE_ERROR_PLANE(psde, "failed to copy ubwc stats roi");
+		return;
+	}
 
-	if (y0 > psde->pipe_cfg.src_rect.h || y1 > psde->pipe_cfg.src_rect.h) {
+	if (roi.y_coord0 > psde->pipe_cfg.src_rect.h || roi.y_coord1 > psde->pipe_cfg.src_rect.h) {
 		SDE_ERROR_PLANE(psde, "invalid ubwc roi y0 0x%x, y1 0x%x, src height 0x%x",
-				y0, y1, psde->pipe_cfg.src_rect.h);
-		y0 = 0;
-		y1 = 0;
+				roi.y_coord0, roi.y_coord1, psde->pipe_cfg.src_rect.h);
+		memset(&roi, 0, sizeof(roi));
 	}
 
-	pstate->ubwc_stats_roi.y_coord0 = y0;
-	pstate->ubwc_stats_roi.y_coord1 = y1;
+end:
+	SDE_EVT32(psde, roi.y_coord0, roi.y_coord1);
+	memcpy(&pstate->ubwc_stats_roi, &roi, sizeof(struct sde_drm_ubwc_stats_roi));
 }
 
 static int sde_plane_atomic_set_property(struct drm_plane *plane,
@@ -4208,7 +4237,8 @@ static int sde_plane_atomic_set_property(struct drm_plane *plane,
 						(void *)(uintptr_t)val);
 				break;
 			case PLANE_PROP_UBWC_STATS_ROI:
-				_sde_plane_set_ubwc_stats_roi(psde, pstate, val);
+				_sde_plane_set_ubwc_stats_roi(psde, pstate,
+						(void __user *)(uintptr_t)val);
 				break;
 			default:
 				/* nothing to do */

+ 14 - 12
msm/sde_dbg.c

@@ -905,6 +905,7 @@ static void _sde_dbg_vbif_disable_block(void __iomem *mem_base, u32 wr_addr)
 			MMSS_VBIF_TEST_BUS2_CTRL0 : MMSS_VBIF_TEST_BUS1_CTRL0;
 	writel_relaxed(0, mem_base + disable_addr);
 	writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+	wmb(); /* update test bus */
 }
 
 static u32 _sde_dbg_vbif_read_test_point(void __iomem *mem_base, u32 wr_addr, u32 rd_addr,
@@ -920,6 +921,7 @@ static void _sde_dbg_vbif_clear_test_point(void __iomem *mem_base, u32 wr_addr)
 {
 	writel_relaxed(0, mem_base + wr_addr);
 	writel_relaxed(0, mem_base + wr_addr + 0x4);
+	wmb(); /* update test point clear */
 }
 
 static u32 _sde_dbg_sde_read_test_point(void __iomem *mem_base, u32 wr_addr, u32 rd_addr,
@@ -1120,7 +1122,7 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus, u32 ena
 	SDE_DBG_LOG_MARKER(name, SDE_DBG_LOG_START, in_log);
 
 	if ((in_mem || in_dump) && (!(*dump_mem))) {
-		*dump_mem = kvzalloc(list_size, GFP_KERNEL);
+		*dump_mem = vzalloc(list_size);
 		bus->cmn.content_size = list_size / sizeof(u32);
 	}
 	dump_addr = *dump_mem;
@@ -1165,7 +1167,7 @@ static void _sde_dbg_dump_dsi_dbg_bus(struct sde_dbg_sde_debug_bus *bus, u32 ena
 
 	mutex_lock(&sde_dbg_dsi_mutex);
 	if ((in_mem || in_dump) && (!(*dump_mem))) {
-		*dump_mem = kvzalloc(list_size, GFP_KERNEL);
+		*dump_mem = vzalloc(list_size);
 		bus->cmn.content_size = list_size / sizeof(u32);
 	}
 	dump_addr = *dump_mem;
@@ -1259,7 +1261,7 @@ static void _sde_dump_array(bool do_panic, const char *name, bool dump_secure, u
 
 	reg_dump_size =  _sde_dbg_get_reg_dump_size();
 	if (!dbg_base->reg_dump_base)
-		dbg_base->reg_dump_base = kvzalloc(reg_dump_size, GFP_KERNEL);
+		dbg_base->reg_dump_base = vzalloc(reg_dump_size);
 
 	dbg_base->reg_dump_addr =  dbg_base->reg_dump_base;
 
@@ -1834,7 +1836,7 @@ static ssize_t sde_recovery_regdump_read(struct file *file, char __user *ubuf,
 
 	if (!rbuf->dump_done && !rbuf->cur_blk) {
 		if (!rbuf->buf)
-			rbuf->buf = kvzalloc(DUMP_BUF_SIZE, GFP_KERNEL);
+			rbuf->buf = vzalloc(DUMP_BUF_SIZE);
 		if (!rbuf->buf) {
 			len =  -ENOMEM;
 			goto err;
@@ -2647,7 +2649,7 @@ static void sde_dbg_reg_base_destroy(void)
 		list_del(&blk_base->reg_base_head);
 		kfree(blk_base);
 	}
-	kvfree(dbg_base->reg_dump_base);
+	vfree(dbg_base->reg_dump_base);
 }
 
 static void sde_dbg_dsi_ctrl_destroy(void)
@@ -2666,12 +2668,12 @@ static void sde_dbg_buses_destroy(void)
 {
 	struct sde_dbg_base *dbg_base = &sde_dbg_base;
 
-	kvfree(dbg_base->dbgbus_sde.cmn.dumped_content);
-	kvfree(dbg_base->dbgbus_vbif_rt.cmn.dumped_content);
-	kvfree(dbg_base->dbgbus_dsi.cmn.dumped_content);
-	kvfree(dbg_base->dbgbus_lutdma.cmn.dumped_content);
-	kvfree(dbg_base->dbgbus_rsc.cmn.dumped_content);
-	kvfree(dbg_base->dbgbus_dp.cmn.dumped_content);
+	vfree(dbg_base->dbgbus_sde.cmn.dumped_content);
+	vfree(dbg_base->dbgbus_vbif_rt.cmn.dumped_content);
+	vfree(dbg_base->dbgbus_dsi.cmn.dumped_content);
+	vfree(dbg_base->dbgbus_lutdma.cmn.dumped_content);
+	vfree(dbg_base->dbgbus_rsc.cmn.dumped_content);
+	vfree(dbg_base->dbgbus_dp.cmn.dumped_content);
 }
 
 /**
@@ -2679,7 +2681,7 @@ static void sde_dbg_buses_destroy(void)
  */
 void sde_dbg_destroy(void)
 {
-	kvfree(sde_dbg_base.regbuf.buf);
+	vfree(sde_dbg_base.regbuf.buf);
 	memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
 	_sde_dbg_debugfs_destroy();
 	sde_dbg_base_evtlog = NULL;

+ 5 - 5
msm/sde_dbg_evtlog.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -227,7 +227,7 @@ struct sde_dbg_evtlog *sde_evtlog_init(void)
 {
 	struct sde_dbg_evtlog *evtlog;
 
-	evtlog = kvzalloc(sizeof(*evtlog), GFP_KERNEL);
+	evtlog = vzalloc(sizeof(*evtlog));
 	if (!evtlog)
 		return ERR_PTR(-ENOMEM);
 
@@ -245,7 +245,7 @@ struct sde_dbg_reglog *sde_reglog_init(void)
 {
 	struct sde_dbg_reglog *reglog;
 
-	reglog = kvzalloc(sizeof(*reglog), GFP_KERNEL);
+	reglog = vzalloc(sizeof(*reglog));
 	if (!reglog)
 		return ERR_PTR(-ENOMEM);
 
@@ -353,7 +353,7 @@ void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
 		list_del(&filter_node->list);
 		kfree(filter_node);
 	}
-	kvfree(evtlog);
+	vfree(evtlog);
 }
 
 void sde_reglog_destroy(struct sde_dbg_reglog *reglog)
@@ -361,5 +361,5 @@ void sde_reglog_destroy(struct sde_dbg_reglog *reglog)
 	if (!reglog)
 		return;
 
-	kvfree(reglog);
+	vfree(reglog);
 }