Przeglądaj źródła

Merge remote-tracking branch 'quic/display-kernel.lnx.4.19' into display-kernel.lnx.1.0

* quic/display-kernel.lnx.4.19:
  disp: msm: dsi: commit DSI PHY timings after update
  disp: msm: add cpu number to the sde event logs
  drm/msm/dsi: allocate current mode memory early for DMS
  drm/msm/dsi: fix panel physical dimensions updated to connector
  disp: msm: sde: fix handling the missing pp-done interrupt cases
  drm/msm/dsi: add flag for mode switch with fps
  disp: msm: dp: reset combo phy if peer_usb_comm is disabled
  Revert "disp: msm: sde: Add LTM sw fuse check support"
  disp: msm: dp: Check if DP version supports FEC and DSC
  disp: msm: sde: add support to handle mdp limits property
  disp: msm: dp: Ensure sink supports DSC decoding of selected BPC
  disp: msm: sde: add spinlocks to handle_frame_done call
  drm/msm/dsi-staging: update mdp transfer time preference
  drm/msm/dsi-staging: use usleep_range instead of msleep in dsi enable

Change-Id: I47253fcbf9c27a5ad477ca6506170a114a704f1d
Signed-off-by: Alisha Thapaliya <[email protected]>
Alisha Thapaliya 5 lat temu
rodzic
commit
9006990186

+ 1 - 0
msm/dp/dp_catalog.c

@@ -1349,6 +1349,7 @@ static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip)
 
 	io_data = catalog->io.usb3_dp_com;
 
+	DP_DEBUG("Program PHYMODE to DP only\n");
 	dp_write(USB3_DP_COM_RESET_OVRD_CTRL, 0x0a);
 	dp_write(USB3_DP_COM_PHY_MODE_CTRL, 0x02);
 	dp_write(USB3_DP_COM_SW_RESET, 0x01);

+ 2 - 1
msm/dp/dp_display.c

@@ -781,7 +781,8 @@ static void dp_display_host_init(struct dp_display_private *dp)
 	if (dp->hpd->orientation == ORIENTATION_CC2)
 		flip = true;
 
-	reset = dp->debug->sim_mode ? false : !dp->hpd->multi_func;
+	reset = dp->debug->sim_mode ? false :
+		(!dp->hpd->multi_func || !dp->hpd->peer_usb_comm);
 
 	dp->power->init(dp->power, flip);
 	dp->hpd->host_init(dp->hpd, &dp->catalog->hpd);

+ 1 - 0
msm/dp/dp_hpd.h

@@ -63,6 +63,7 @@ struct dp_hpd {
 	bool hpd_irq;
 	bool alt_mode_cfg_done;
 	bool multi_func;
+	bool peer_usb_comm;
 
 	void (*isr)(struct dp_hpd *dp_hpd);
 	int (*register_hpd)(struct dp_hpd *dp_hpd);

+ 26 - 4
msm/dp/dp_panel.c

@@ -2023,7 +2023,9 @@ static void dp_panel_decode_dsc_dpcd(struct dp_panel *dp_panel)
 		dp_panel->sink_dsc_caps.dsc_capable = true;
 		dp_panel->sink_dsc_caps.version = dp_panel->dsc_dpcd[1];
 		dp_panel->sink_dsc_caps.block_pred_en =
-			dp_panel->dsc_dpcd[6] ? true : false;
+				dp_panel->dsc_dpcd[6] ? true : false;
+		dp_panel->sink_dsc_caps.color_depth =
+				dp_panel->dsc_dpcd[10];
 
 		if (dp_panel->sink_dsc_caps.version >= 0x11)
 			dp_panel->dsc_en = true;
@@ -2152,7 +2154,8 @@ static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
 	dp_panel->fec_en = false;
 	dp_panel->dsc_en = false;
 
-	if (dp_panel->fec_feature_enable) {
+	if (dp_panel->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+			dp_panel->fec_feature_enable) {
 		dp_panel_read_sink_fec_caps(dp_panel);
 
 		if (dp_panel->dsc_feature_enable && dp_panel->fec_en)
@@ -2181,12 +2184,31 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
 	link_info = &dp_panel->link_info;
 	data_rate_khz = link_info->num_lanes * link_info->rate * 8;
 
-	while (bpp > min_supported_bpp) {
+	for (; bpp > min_supported_bpp; bpp -= 6) {
+		if (dp_panel->dsc_en) {
+			if (bpp == 36 && !(dp_panel->sink_dsc_caps.color_depth
+					& DP_DSC_12_BPC))
+				continue;
+			else if (bpp == 30 &&
+					!(dp_panel->sink_dsc_caps.color_depth &
+					DP_DSC_10_BPC))
+				continue;
+			else if (bpp == 24 &&
+					!(dp_panel->sink_dsc_caps.color_depth &
+					DP_DSC_8_BPC))
+				continue;
+		}
+
 		if (mode_pclk_khz * bpp <= data_rate_khz)
 			break;
-		bpp -= 6;
 	}
 
+	if (bpp < min_supported_bpp)
+		DP_ERR("bpp %d is below minimum supported bpp %d\n", bpp,
+				min_supported_bpp);
+	if (dp_panel->dsc_en && bpp != 24 && bpp != 30 && bpp != 36)
+		DP_ERR("bpp %d is not supported when dsc is enabled\n", bpp);
+
 	return bpp;
 }
 

+ 1 - 0
msm/dp/dp_panel.h

@@ -81,6 +81,7 @@ struct dp_dsc_caps {
 	bool dsc_capable;
 	u8 version;
 	bool block_pred_en;
+	u8 color_depth;
 };
 
 struct dp_audio;

+ 4 - 2
msm/dp/dp_usbpd.c

@@ -235,7 +235,8 @@ static void dp_usbpd_send_event(struct dp_usbpd_private *pd,
 	}
 }
 
-static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr)
+static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr,
+		bool peer_usb_comm)
 {
 	struct dp_usbpd_private *pd;
 
@@ -245,7 +246,8 @@ static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr)
 		return;
 	}
 
-	DP_DEBUG("\n");
+	DP_DEBUG("peer_usb_comm: %d\n");
+	pd->dp_usbpd.base.peer_usb_comm = peer_usb_comm;
 	dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER);
 }
 

+ 1 - 0
msm/dsi/dsi_catalog.c

@@ -262,6 +262,7 @@ static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
 	phy->ops.dyn_refresh_ops.cache_phy_timings =
 		dsi_phy_hw_v4_0_cache_phy_timings;
 	phy->ops.set_continuous_clk = dsi_phy_hw_v4_0_set_continuous_clk;
+	phy->ops.commit_phy_timing = dsi_phy_hw_v4_0_commit_phy_timing;
 }
 
 /**

+ 2 - 0
msm/dsi/dsi_catalog.h

@@ -116,6 +116,8 @@ int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy);
 void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
 void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy);
 void dsi_phy_hw_v4_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable);
+void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy,
+		struct dsi_phy_per_lane_cfgs *timing);
 
 /* DSI controller common ops */
 u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);

+ 2 - 0
msm/dsi/dsi_defs.h

@@ -96,6 +96,7 @@ enum dsi_op_mode {
  * @DSI_MODE_FLAG_POMS:
  *         Seamless transition is dynamic panel operating mode switch
  * @DSI_MODE_FLAG_DYN_CLK: Seamless transition is dynamic clock change
+ * @DSI_MODE_FLAG_DMS_FPS: Seamless fps only transition in Dynamic Mode Switch
  */
 enum dsi_mode_flags {
 	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
@@ -105,6 +106,7 @@ enum dsi_mode_flags {
 	DSI_MODE_FLAG_VRR			= BIT(4),
 	DSI_MODE_FLAG_POMS			= BIT(5),
 	DSI_MODE_FLAG_DYN_CLK			= BIT(6),
+	DSI_MODE_FLAG_DMS_FPS                   = BIT(7),
 };
 
 /**

+ 46 - 15
msm/dsi/dsi_display.c

@@ -4387,6 +4387,30 @@ static bool dsi_display_validate_mode_seamless(struct dsi_display *display,
 	return rc;
 }
 
+static void dsi_display_validate_dms_fps(struct dsi_display_mode *cur_mode,
+		struct dsi_display_mode *to_mode)
+{
+	u32 cur_fps, to_fps;
+	u32 cur_h_active, to_h_active;
+	u32 cur_v_active, to_v_active;
+
+	cur_fps = cur_mode->timing.refresh_rate;
+	to_fps = to_mode->timing.refresh_rate;
+	cur_h_active = cur_mode->timing.h_active;
+	cur_v_active = cur_mode->timing.v_active;
+	to_h_active = to_mode->timing.h_active;
+	to_v_active = to_mode->timing.v_active;
+
+	if ((cur_h_active == to_h_active) && (cur_v_active == to_v_active) &&
+			(cur_fps != to_fps)) {
+		to_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS_FPS;
+		DSI_DEBUG("DMS Modeset with FPS change\n");
+	} else {
+		to_mode->dsi_mode_flags &= ~DSI_MODE_FLAG_DMS_FPS;
+	}
+}
+
+
 static int dsi_display_set_mode_sub(struct dsi_display *display,
 				    struct dsi_display_mode *mode,
 				    u32 flags)
@@ -4395,6 +4419,7 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 	int i;
 	struct dsi_display_ctrl *ctrl;
 	struct dsi_display_mode_priv_info *priv_info;
+	bool commit_phy_timing = false;
 
 	priv_info = mode->priv_info;
 	if (!priv_info) {
@@ -4403,6 +4428,7 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 		return -EINVAL;
 	}
 
+	SDE_EVT32(mode->dsi_mode_flags);
 	if (mode->dsi_mode_flags & DSI_MODE_FLAG_POMS) {
 		display->config.panel_mode = mode->panel_mode;
 		display->panel->panel_mode = mode->panel_mode;
@@ -4462,15 +4488,20 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 	}
 
 	if ((mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) &&
-			(display->panel->panel_mode == DSI_OP_CMD_MODE))
+			(display->panel->panel_mode == DSI_OP_CMD_MODE)) {
+		commit_phy_timing = true;
 		atomic_set(&display->clkrate_change_pending, 1);
 
+		dsi_display_validate_dms_fps(display->panel->cur_mode, mode);
+	}
+
 	if (priv_info->phy_timing_len) {
 		display_for_each_ctrl(i, display) {
 			ctrl = &display->ctrl[i];
 			 rc = dsi_phy_set_timing_params(ctrl->phy,
 				priv_info->phy_timing_val,
-				priv_info->phy_timing_len);
+				priv_info->phy_timing_len,
+				commit_phy_timing);
 			if (rc)
 				DSI_ERR("failed to add DSI PHY timing params\n");
 		}
@@ -5930,9 +5961,7 @@ int dsi_display_get_modes(struct dsi_display *display,
 				display_mode.timing.min_dsi_clk_hz;
 
 			display_mode.priv_info->mdp_transfer_time_us =
-				display_mode.priv_info->dsi_transfer_time_us;
-			display_mode.timing.mdp_transfer_time_us =
-				display_mode.timing.dsi_transfer_time_us;
+				display_mode.timing.mdp_transfer_time_us;
 		}
 
 		is_split_link = host->split_link.split_link_enabled;
@@ -6280,6 +6309,15 @@ int dsi_display_set_mode(struct dsi_display *display,
 	timing = adj_mode.timing;
 	adjust_timing_by_ctrl_count(display, &adj_mode);
 
+	if (!display->panel->cur_mode) {
+		display->panel->cur_mode =
+			kzalloc(sizeof(struct dsi_display_mode), GFP_KERNEL);
+		if (!display->panel->cur_mode) {
+			rc = -ENOMEM;
+			goto error;
+		}
+	}
+
 	/*For dynamic DSI setting, use specified clock rate */
 	if (display->cached_clk_rate > 0)
 		adj_mode.priv_info->clk_rate_hz = display->cached_clk_rate;
@@ -6296,15 +6334,6 @@ int dsi_display_set_mode(struct dsi_display *display,
 		goto error;
 	}
 
-	if (!display->panel->cur_mode) {
-		display->panel->cur_mode =
-			kzalloc(sizeof(struct dsi_display_mode), GFP_KERNEL);
-		if (!display->panel->cur_mode) {
-			rc = -ENOMEM;
-			goto error;
-		}
-	}
-
 	DSI_INFO("mdp_transfer_time_us=%d us\n",
 			adj_mode.priv_info->mdp_transfer_time_us);
 	DSI_INFO("hactive= %d,vactive= %d,fps=%d\n",
@@ -7136,7 +7165,9 @@ int dsi_display_enable(struct dsi_display *display)
 		}
 	}
 
-	if (mode->priv_info->dsc_enabled) {
+	/* Block sending pps command if modeset is due to fps difference */
+	if ((mode->priv_info->dsc_enabled) &&
+			!(mode->dsi_mode_flags & DSI_MODE_FLAG_DMS_FPS)) {
 		mode->priv_info->dsc.pic_width *= display->ctrl_count;
 		rc = dsi_panel_update_pps(display->panel);
 		if (rc) {

+ 10 - 0
msm/dsi/dsi_drm.c

@@ -799,6 +799,8 @@ int dsi_connector_get_modes(struct drm_connector *connector, void *data,
 	struct drm_display_mode drm_mode;
 	struct dsi_display *display = data;
 	struct edid edid;
+	u8 width_mm = connector->display_info.width_mm;
+	u8 height_mm = connector->display_info.height_mm;
 	const u8 edid_buf[EDID_LENGTH] = {
 		0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x44, 0x6D,
 		0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1B, 0x10, 0x01, 0x03,
@@ -860,6 +862,14 @@ int dsi_connector_get_modes(struct drm_connector *connector, void *data,
 	rc =  drm_connector_update_edid_property(connector, &edid);
 	if (rc)
 		count = 0;
+	/*
+	 * DRM EDID structure maintains panel physical dimensions in
+	 * centimeters, we will be losing the precision anything below cm.
+	 * Changing DRM framework will effect other clients at this
+	 * moment, overriding the values back to millimeter.
+	 */
+	connector->display_info.width_mm = width_mm;
+	connector->display_info.height_mm = height_mm;
 end:
 	DSI_DEBUG("MODE COUNT =%d\n\n", count);
 	return count;

+ 8 - 0
msm/dsi/dsi_panel.c

@@ -3646,6 +3646,14 @@ void dsi_panel_calc_dsi_transfer_time(struct dsi_host_common_cfg *config,
 			frame_threshold_us;
 	}
 
+	timing->mdp_transfer_time_us = timing->dsi_transfer_time_us;
+
+	/* Force update mdp xfer time to hal,if clk and mdp xfer time is set */
+	if (mode->priv_info->mdp_transfer_time_us && timing->clk_rate_hz) {
+		timing->mdp_transfer_time_us =
+			mode->priv_info->mdp_transfer_time_us;
+	}
+
 	/* Calculate pclk_khz to update modeinfo */
 	pclk_rate_hz = mult_frac(min_bitclk_hz, frame_time_us,
 			timing->dsi_transfer_time_us);

+ 7 - 1
msm/dsi/dsi_phy.c

@@ -1071,6 +1071,8 @@ int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
  * @phy:          DSI PHY handle
  * @timing:       array holding timing params.
  * @size:         size of the array.
+ * @commit:		  boolean to indicate if programming PHY HW registers is
+ *				  required
  *
  * When PHY timing calculator is not implemented, this array will be used to
  * pass PHY timing information.
@@ -1078,7 +1080,7 @@ int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
  * Return: error code.
  */
 int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
-			      u32 *timing, u32 size)
+			      u32 *timing, u32 size, bool commit)
 {
 	int rc = 0;
 
@@ -1091,9 +1093,13 @@ int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
 
 	if (phy->hw.ops.phy_timing_val)
 		rc = phy->hw.ops.phy_timing_val(&phy->cfg.timing, timing, size);
+
 	if (!rc)
 		phy->cfg.is_phy_timing_present = true;
 
+	if (phy->hw.ops.commit_phy_timing && commit)
+		phy->hw.ops.commit_phy_timing(&phy->hw, &phy->cfg.timing);
+
 	mutex_unlock(&phy->phy_lock);
 	return rc;
 }

+ 3 - 1
msm/dsi/dsi_phy.h

@@ -244,6 +244,8 @@ int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
  * @phy:          DSI PHY handle
  * @timing:       array holding timing params.
  * @size:         size of the array.
+ * @commit:		  boolean to indicate if programming PHY HW registers is
+ *				  required
  *
  * When PHY timing calculator is not implemented, this array will be used to
  * pass PHY timing information.
@@ -251,7 +253,7 @@ int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
  * Return: error code.
  */
 int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
-			      u32 *timing, u32 size);
+			      u32 *timing, u32 size, bool commit);
 
 /**
  * dsi_phy_lane_reset() - Reset DSI PHY lanes in case of error

+ 8 - 0
msm/dsi/dsi_phy_hw.h

@@ -321,6 +321,14 @@ struct dsi_phy_hw_ops {
 	 */
 	void (*set_continuous_clk)(struct dsi_phy_hw *phy, bool enable);
 
+	/**
+	 * commit_phy_timing() - Commit PHY timing
+	 * @phy:	Pointer to DSI PHY hardware object.
+	 * @timing: Pointer to PHY timing array
+	 */
+	void (*commit_phy_timing)(struct dsi_phy_hw *phy,
+			struct dsi_phy_per_lane_cfgs *timing);
+
 	void *timing_ops;
 	struct phy_ulps_config_ops ulps_ops;
 	struct phy_dyn_refresh_ops dyn_refresh_ops;

+ 21 - 14
msm/dsi/dsi_phy_hw_v4_0.c

@@ -183,6 +183,26 @@ static void dsi_phy_hw_v4_0_lane_settings(struct dsi_phy_hw *phy,
 
 }
 
+void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy,
+		struct dsi_phy_per_lane_cfgs *timing)
+{
+	/* Commit DSI PHY timings */
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v4[0]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v4[1]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v4[2]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v4[3]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v4[4]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v4[5]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v4[6]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v4[7]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v4[8]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v4[9]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v4[10]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v4[11]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_12, timing->lane_v4[12]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_13, timing->lane_v4[13]);
+}
+
 /**
  * enable() - Enable PHY hardware
  * @phy:      Pointer to DSI PHY hardware object.
@@ -290,20 +310,7 @@ void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
 	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
 
 	/* DSI PHY timings */
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v4[0]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v4[1]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v4[2]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v4[3]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v4[4]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v4[5]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v4[6]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v4[7]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v4[8]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v4[9]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v4[10]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v4[11]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_12, timing->lane_v4[12]);
-	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_13, timing->lane_v4[13]);
+	dsi_phy_hw_v4_0_commit_phy_timing(phy, timing);
 
 	/* DSI lane settings */
 	dsi_phy_hw_v4_0_lane_settings(phy, cfg);

+ 29 - 10
msm/dsi/dsi_pwr.c

@@ -126,12 +126,18 @@ static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
 	int rc = 0, i = 0;
 	struct dsi_vreg *vreg;
 	int num_of_v = 0;
+	u32 pre_on_ms, post_on_ms;
+	u32 pre_off_ms, post_off_ms;
 
 	if (enable) {
 		for (i = 0; i < regs->count; i++) {
 			vreg = &regs->vregs[i];
+			pre_on_ms = vreg->pre_on_sleep;
+			post_on_ms = vreg->post_on_sleep;
+
 			if (vreg->pre_on_sleep)
-				msleep(vreg->pre_on_sleep);
+				usleep_range((pre_on_ms * 1000),
+						(pre_on_ms * 1000) + 10);
 
 			rc = regulator_set_load(vreg->vreg,
 						vreg->enable_load);
@@ -160,12 +166,18 @@ static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
 			}
 
 			if (vreg->post_on_sleep)
-				msleep(vreg->post_on_sleep);
+				usleep_range((post_on_ms * 1000),
+						(post_on_ms * 1000) + 10);
 		}
 	} else {
 		for (i = (regs->count - 1); i >= 0; i--) {
-			if (regs->vregs[i].pre_off_sleep)
-				msleep(regs->vregs[i].pre_off_sleep);
+			vreg = &regs->vregs[i];
+			pre_off_ms = vreg->pre_off_sleep;
+			post_off_ms = vreg->post_off_sleep;
+
+			if (pre_off_ms)
+				usleep_range((pre_off_ms * 1000),
+						(pre_off_ms * 1000) + 10);
 
 			if (regs->vregs[i].off_min_voltage)
 				(void)regulator_set_voltage(regs->vregs[i].vreg,
@@ -176,8 +188,9 @@ static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
 						regs->vregs[i].disable_load);
 			(void)regulator_disable(regs->vregs[i].vreg);
 
-			if (regs->vregs[i].post_off_sleep)
-				msleep(regs->vregs[i].post_off_sleep);
+			if (post_off_ms)
+				usleep_range((post_off_ms * 1000),
+						(post_off_ms * 1000) + 10);
 		}
 	}
 
@@ -192,8 +205,13 @@ error_disable_voltage:
 					    0, regs->vregs[i].max_voltage);
 error:
 	for (i--; i >= 0; i--) {
-		if (regs->vregs[i].pre_off_sleep)
-			msleep(regs->vregs[i].pre_off_sleep);
+		vreg = &regs->vregs[i];
+		pre_off_ms = vreg->pre_off_sleep;
+		post_off_ms = vreg->post_off_sleep;
+
+		if (pre_off_ms)
+			usleep_range((pre_off_ms * 1000),
+					(pre_off_ms * 1000) + 10);
 
 		(void)regulator_set_load(regs->vregs[i].vreg,
 					 regs->vregs[i].disable_load);
@@ -205,8 +223,9 @@ error:
 
 		(void)regulator_disable(regs->vregs[i].vreg);
 
-		if (regs->vregs[i].post_off_sleep)
-			msleep(regs->vregs[i].post_off_sleep);
+		if (post_off_ms)
+			usleep_range((post_off_ms * 1000),
+					(post_off_ms * 1000) + 10);
 	}
 
 	return rc;

+ 1 - 13
msm/sde/sde_color_processing.c

@@ -1883,21 +1883,9 @@ static void dspp_ltm_install_property(struct drm_crtc *crtc)
 	char feature_name[256];
 	struct sde_kms *kms = NULL;
 	struct sde_mdss_cfg *catalog = NULL;
-	u32 version = 0, ltm_sw_fuse = 0;
+	u32 version;
 
 	kms = get_kms(crtc);
-	if (!kms || !kms->hw_sw_fuse) {
-		DRM_ERROR("!kms = %d\n", !kms);
-		return;
-	}
-
-	ltm_sw_fuse = sde_hw_get_ltm_sw_fuse_value(kms->hw_sw_fuse);
-	DRM_DEBUG_DRIVER("ltm_sw_fuse value: 0x%x\n", ltm_sw_fuse);
-	if (ltm_sw_fuse != SW_FUSE_ENABLE) {
-		pr_info("ltm_sw_fuse is not enabled: 0x%x\n", ltm_sw_fuse);
-		return;
-	}
-
 	catalog = kms->catalog;
 	version = catalog->dspp[0].sblk->ltm.version >> 16;
 	snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",

+ 33 - 0
msm/sde/sde_crtc.c

@@ -4708,6 +4708,8 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 	struct drm_device *dev;
 	struct sde_kms_info *info;
 	struct sde_kms *sde_kms;
+	int i, j;
+
 	static const struct drm_prop_enum_list e_secure_level[] = {
 		{SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
 		{SDE_DRM_SEC_ONLY, "sec_only"},
@@ -4925,6 +4927,37 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 	if (sde_kms->perf.max_core_clk_rate)
 		sde_kms_info_add_keyint(info, "max_mdp_clk",
 				sde_kms->perf.max_core_clk_rate);
+
+	for (i = 0; i < catalog->limit_count; i++) {
+		sde_kms_info_add_keyint(info,
+			catalog->limit_cfg[i].name,
+			catalog->limit_cfg[i].lmt_case_cnt);
+
+		for (j = 0; j < catalog->limit_cfg[i].lmt_case_cnt; j++) {
+			sde_kms_info_add_keyint(info,
+				catalog->limit_cfg[i].vector_cfg[j].usecase,
+				catalog->limit_cfg[i].vector_cfg[j].value);
+		}
+
+		if (!strcmp(catalog->limit_cfg[i].name,
+			"sspp_linewidth_usecases"))
+			sde_kms_info_add_keyint(info,
+				"sspp_linewidth_values",
+				catalog->limit_cfg[i].lmt_vec_cnt);
+		else if (!strcmp(catalog->limit_cfg[i].name,
+			"sde_bwlimit_usecases"))
+			sde_kms_info_add_keyint(info,
+				"sde_bwlimit_values",
+				catalog->limit_cfg[i].lmt_vec_cnt);
+
+		for (j = 0; j < catalog->limit_cfg[i].lmt_vec_cnt; j++) {
+			sde_kms_info_add_keyint(info, "limit_usecase",
+				catalog->limit_cfg[i].value_cfg[j].use_concur);
+			sde_kms_info_add_keyint(info, "limit_value",
+				catalog->limit_cfg[i].value_cfg[j].value);
+		}
+	}
+
 	sde_kms_info_add_keystr(info, "core_ib_ff",
 			catalog->perf.core_ib_ff);
 	sde_kms_info_add_keystr(info, "core_clk_ff",

+ 59 - 17
msm/sde/sde_encoder_phys_cmd.c

@@ -186,8 +186,10 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
 	    atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0)) {
 		event = SDE_ENCODER_FRAME_EVENT_DONE |
 			SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
+		spin_lock(phys_enc->enc_spinlock);
 		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
 				phys_enc, event);
+		spin_unlock(phys_enc->enc_spinlock);
 	}
 
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
@@ -285,9 +287,12 @@ static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx)
 
 	if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) {
 		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-		if (phys_enc->parent_ops.handle_frame_done)
+		if (phys_enc->parent_ops.handle_frame_done) {
+			spin_lock(phys_enc->enc_spinlock);
 			phys_enc->parent_ops.handle_frame_done(
 					phys_enc->parent, phys_enc, event);
+			spin_unlock(phys_enc->enc_spinlock);
+		}
 	}
 
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
@@ -467,6 +472,7 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
 	struct drm_connector *conn;
 	int event;
 	u32 pending_kickoff_cnt;
+	unsigned long lock_flags;
 
 	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
 		return -EINVAL;
@@ -523,9 +529,12 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
 	phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;
 
 exit:
-	if (phys_enc->parent_ops.handle_frame_done)
+	if (phys_enc->parent_ops.handle_frame_done) {
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 		phys_enc->parent_ops.handle_frame_done(
 				phys_enc->parent, phys_enc, frame_event);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+	}
 
 	return -ETIMEDOUT;
 }
@@ -654,27 +663,23 @@ static bool _sde_encoder_phys_cmd_is_ongoing_pptx(
 	return false;
 }
 
-static int _sde_encoder_phys_cmd_wait_for_idle(
+static bool _sde_encoder_phys_cmd_is_scheduler_idle(
 		struct sde_encoder_phys *phys_enc)
 {
+	bool wr_ptr_wait_success = true;
+	unsigned long lock_flags;
+	bool ret = false;
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_encoder_wait_info wait_info = {0};
-	bool recovery_events;
-	int ret;
-	struct sde_hw_ctl *ctl;
-	bool wr_ptr_wait_success = true;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	ctl = phys_enc->hw_ctl;
+	struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
 
 	if (sde_encoder_phys_cmd_is_master(phys_enc))
 		wr_ptr_wait_success = cmd_enc->wr_ptr_wait_success;
 
+	/*
+	 * Handle cases where a pp-done interrupt is missed
+	 * due to irq latency with POSTED start
+	 */
 	if (wr_ptr_wait_success &&
 	    (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) &&
 	    ctl->ops.get_scheduler_status &&
@@ -682,11 +687,36 @@ static int _sde_encoder_phys_cmd_wait_for_idle(
 	    atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0) &&
 	    phys_enc->parent_ops.handle_frame_done) {
 
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 		phys_enc->parent_ops.handle_frame_done(
 			phys_enc->parent, phys_enc,
 			SDE_ENCODER_FRAME_EVENT_DONE |
 			SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE);
-		return 0;
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+		SDE_EVT32(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			phys_enc->hw_intf->idx - INTF_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
+
+		ret = true;
+	}
+
+	return ret;
+}
+
+static int _sde_encoder_phys_cmd_wait_for_idle(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_encoder_wait_info wait_info = {0};
+	bool recovery_events;
+	int ret;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
 	}
 
 	if (atomic_read(&phys_enc->pending_kickoff_cnt) > 1)
@@ -702,9 +732,15 @@ static int _sde_encoder_phys_cmd_wait_for_idle(
 	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
 		return 0;
 
+	if (_sde_encoder_phys_cmd_is_scheduler_idle(phys_enc))
+		return 0;
+
 	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
 			&wait_info);
 	if (ret == -ETIMEDOUT) {
+		if (_sde_encoder_phys_cmd_is_scheduler_idle(phys_enc))
+			return 0;
+
 		_sde_encoder_phys_cmd_handle_ppdone_timeout(phys_enc,
 				recovery_events);
 	} else if (!ret) {
@@ -1466,6 +1502,7 @@ static int _sde_encoder_phys_cmd_handle_wr_ptr_timeout(
 			to_sde_encoder_phys_cmd(phys_enc);
 	bool switch_te;
 	int ret = -ETIMEDOUT;
+	unsigned long lock_flags;
 
 	switch_te = _sde_encoder_phys_cmd_needs_vsync_change(
 				phys_enc, profile_timestamp);
@@ -1495,10 +1532,15 @@ static int _sde_encoder_phys_cmd_handle_wr_ptr_timeout(
 		SDE_EVT32(DRMID(phys_enc->parent), switch_te, SDE_EVTLOG_ERROR);
 
 		if (sde_encoder_phys_cmd_is_master(phys_enc) &&
-		  atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0))
+			atomic_add_unless(
+			&phys_enc->pending_retire_fence_cnt, -1, 0)) {
+			spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 			phys_enc->parent_ops.handle_frame_done(
 				phys_enc->parent, phys_enc,
 				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+			spin_unlock_irqrestore(phys_enc->enc_spinlock,
+				lock_flags);
+		}
 	}
 
 	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;

+ 170 - 0
msm/sde/sde_hw_catalog.c

@@ -185,6 +185,7 @@ enum sde_prop {
 	UBWC_BW_CALC_VERSION,
 	PIPE_ORDER_VERSION,
 	SEC_SID_MASK,
+	SDE_LIMITS,
 	SDE_PROP_MAX,
 };
 
@@ -279,6 +280,14 @@ enum {
 	INTF_PROP_MAX,
 };
 
+enum {
+	LIMIT_NAME,
+	LIMIT_USECASE,
+	LIMIT_ID,
+	LIMIT_VALUE,
+	LIMIT_PROP_MAX,
+};
+
 enum {
 	PP_OFF,
 	PP_LEN,
@@ -479,6 +488,7 @@ static struct sde_prop_type sde_prop[] = {
 	{PIPE_ORDER_VERSION, "qcom,sde-pipe-order-version", false,
 			PROP_TYPE_U32},
 	{SEC_SID_MASK, "qcom,sde-secure-sid-mask", false, PROP_TYPE_U32_ARRAY},
+	{SDE_LIMITS, "qcom,sde-limits", false, PROP_TYPE_NODE},
 };
 
 static struct sde_prop_type sde_perf_prop[] = {
@@ -765,6 +775,14 @@ static struct sde_prop_type qdss_prop[] = {
 	{HW_LEN, "qcom,sde-qdss-size", false, PROP_TYPE_U32},
 };
 
+static struct sde_prop_type limit_usecase_prop[] = {
+	{LIMIT_NAME, "qcom,sde-limit-name", false, PROP_TYPE_STRING},
+	{LIMIT_USECASE, "qcom,sde-limit-cases", false, PROP_TYPE_STRING_ARRAY},
+	{LIMIT_ID, "qcom,sde-limit-ids", false, PROP_TYPE_U32_ARRAY},
+	{LIMIT_VALUE, "qcom,sde-limit-values", false,
+			PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
 /*************************************************************
  * static API list
  *************************************************************/
@@ -3146,6 +3164,149 @@ static int _sde_parse_prop_check(struct sde_mdss_cfg *cfg,
 	return 0;
 }
 
+static int sde_read_limit_node(struct device_node *snp,
+	struct sde_prop_value *lmt_val, struct sde_mdss_cfg *cfg)
+{
+	int j, i = 0, rc = 0;
+	const char *type = NULL;
+	struct device_node *node = NULL;
+
+	for_each_child_of_node(snp, node) {
+		cfg->limit_cfg[i].vector_cfg =
+			kcalloc(cfg->limit_cfg[i].lmt_case_cnt,
+				sizeof(struct limit_vector_cfg), GFP_KERNEL);
+		if (!cfg->limit_cfg[i].vector_cfg) {
+			rc = -ENOMEM;
+			goto error;
+		}
+
+		for (j = 0; j < cfg->limit_cfg[i].lmt_case_cnt; j++) {
+			of_property_read_string_index(node,
+				limit_usecase_prop[LIMIT_USECASE].prop_name,
+				j, &type);
+			cfg->limit_cfg[i].vector_cfg[j].usecase = type;
+			cfg->limit_cfg[i].vector_cfg[j].value =
+				PROP_VALUE_ACCESS(&lmt_val[i * LIMIT_PROP_MAX],
+				LIMIT_ID, j);
+		}
+
+		cfg->limit_cfg[i].value_cfg =
+				kcalloc(cfg->limit_cfg[i].lmt_vec_cnt,
+				sizeof(struct limit_value_cfg), GFP_KERNEL);
+
+		if (!cfg->limit_cfg[i].value_cfg) {
+			rc = -ENOMEM;
+			goto error;
+		}
+
+		for (j = 0; j < cfg->limit_cfg[i].lmt_vec_cnt; j++) {
+			cfg->limit_cfg[i].value_cfg[j].use_concur =
+				PROP_BITVALUE_ACCESS(
+					&lmt_val[i * LIMIT_PROP_MAX],
+					LIMIT_VALUE, j, 0);
+			cfg->limit_cfg[i].value_cfg[j].value =
+				PROP_BITVALUE_ACCESS(
+					&lmt_val[i * LIMIT_PROP_MAX],
+					LIMIT_VALUE, j, 1);
+
+		}
+		i++;
+	}
+
+	return 0;
+error:
+	for (j = 0; j < cfg->limit_count; j++) {
+		kfree(cfg->limit_cfg[j].vector_cfg);
+		kfree(cfg->limit_cfg[j].value_cfg);
+	}
+
+	cfg->limit_count = 0;
+	return rc;
+}
+
+static int sde_validate_limit_node(struct device_node *snp,
+	struct sde_prop_value *sde_limit_value, struct sde_mdss_cfg *cfg)
+{
+	int i = 0, rc = 0;
+	struct device_node *node = NULL;
+	int limit_value_count[LIMIT_PROP_MAX];
+	bool limit_value_exists[LIMIT_SUBBLK_COUNT_MAX][LIMIT_PROP_MAX];
+	const char *type = NULL;
+
+	for_each_child_of_node(snp, node) {
+		rc = _validate_dt_entry(node, limit_usecase_prop,
+			ARRAY_SIZE(limit_usecase_prop),
+			limit_value_count, NULL);
+		if (rc)
+			goto end;
+
+		rc = _read_dt_entry(node, limit_usecase_prop,
+			ARRAY_SIZE(limit_usecase_prop), limit_value_count,
+			&limit_value_exists[i][0],
+			&sde_limit_value[i * LIMIT_PROP_MAX]);
+		if (rc)
+			goto end;
+
+		cfg->limit_cfg[i].lmt_case_cnt =
+				limit_value_count[LIMIT_ID];
+
+		cfg->limit_cfg[i].lmt_vec_cnt =
+				limit_value_count[LIMIT_VALUE];
+		of_property_read_string(node,
+			limit_usecase_prop[LIMIT_NAME].prop_name, &type);
+		cfg->limit_cfg[i].name = type;
+
+		if (!limit_value_count[LIMIT_ID] ||
+				!limit_value_count[LIMIT_VALUE]) {
+			rc = -EINVAL;
+			goto end;
+		}
+		i++;
+	}
+	return 0;
+end:
+	cfg->limit_count = 0;
+	return rc;
+}
+
+static int sde_limit_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+	struct device_node *snp = NULL;
+	struct sde_prop_value  *sde_limit_value = NULL;
+	int rc = 0;
+
+	snp = of_get_child_by_name(np, sde_prop[SDE_LIMITS].prop_name);
+	if (!snp)
+		goto end;
+
+	cfg->limit_count = of_get_child_count(snp);
+	if (cfg->limit_count < 0) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	sde_limit_value = kzalloc(cfg->limit_count * LIMIT_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!sde_limit_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = sde_validate_limit_node(snp, sde_limit_value, cfg);
+	if (rc) {
+		SDE_ERROR("validating limit node failed\n");
+		goto end;
+	}
+
+	rc = sde_read_limit_node(snp, sde_limit_value, cfg);
+	if (rc)
+		SDE_ERROR("reading limit node failed\n");
+
+end:
+	kfree(sde_limit_value);
+	return rc;
+}
+
 static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
 {
 	int rc, i, dma_rc, len, prop_count[SDE_PROP_MAX];
@@ -3256,6 +3417,10 @@ static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
 	cfg->has_idle_pc = PROP_VALUE_ACCESS(prop_value, IDLE_PC, 0);
 	cfg->pipe_order_type = PROP_VALUE_ACCESS(prop_value,
 		PIPE_ORDER_VERSION, 0);
+
+	rc = sde_limit_parse_dt(np, cfg);
+	if (rc)
+		SDE_DEBUG("parsing of sde limit failed\n");
 end:
 	kfree(prop_value);
 	return rc;
@@ -4176,6 +4341,11 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 			kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
 	}
 
+	for (i = 0; i < sde_cfg->limit_count; i++) {
+		kfree(sde_cfg->limit_cfg[i].vector_cfg);
+		kfree(sde_cfg->limit_cfg[i].value_cfg);
+	}
+
 	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++) {
 		kfree(sde_cfg->perf.sfe_lut_tbl[i].entries);
 		kfree(sde_cfg->perf.qos_lut_tbl[i].entries);

+ 40 - 0
msm/sde/sde_hw_catalog.h

@@ -80,6 +80,7 @@
 
 #define MAX_XIN_COUNT 16
 #define SSPP_SUBBLK_COUNT_MAX 2
+#define LIMIT_SUBBLK_COUNT_MAX 10
 
 #define SDE_CTL_CFG_VERSION_1_0_0       0x100
 #define MAX_INTF_PER_CTL_V1                 2
@@ -1181,6 +1182,42 @@ struct sde_perf_cfg {
 	u32 num_mnoc_ports;
 };
 
+/**
+ * struct limit_vector_cfg - information on the usecase for each limit
+ * @usecase: usecase for each limit
+ * @value: id corresponding to each usecase
+ */
+struct limit_vector_cfg {
+	const char *usecase;
+	u32 value;
+};
+
+/**
+ * struct limit_value_cfg - information on the value of usecase
+ * @use_concur: usecase for each limit
+ * @value: value corresponding to usecase for each limit
+ */
+struct limit_value_cfg {
+	u32 use_concur;
+	u32 value;
+};
+
+/**
+ * struct sde_limit_cfg - information om different mdp limits
+ * @name: name of the limit property
+ * @lmt_vec_cnt: number of vector values for each limit
+ * @lmt_case_cnt: number of usecases for each limit
+ * @vector_cfg: pointer to the vector entries containing info on usecase
+ * @value_cfg: pointer to the value of each vector entry
+ */
+struct sde_limit_cfg {
+	const char *name;
+	u32 lmt_vec_cnt;
+	u32 lmt_case_cnt;
+	struct limit_vector_cfg *vector_cfg;
+	struct limit_value_cfg *value_cfg;
+};
+
 /**
  * struct sde_mdss_cfg - information of MDSS HW
  * This is the main catalog data structure representing
@@ -1366,6 +1403,9 @@ struct sde_mdss_cfg {
 	u32 qdss_count;
 	struct sde_qdss_cfg qdss[MAX_BLOCKS];
 
+	u32 limit_count;
+	struct sde_limit_cfg limit_cfg[LIMIT_SUBBLK_COUNT_MAX];
+
 	/* Add additional block data structures here */
 
 	struct sde_perf_cfg perf;

+ 0 - 32
msm/sde/sde_hw_top.c

@@ -47,8 +47,6 @@
 #define MDP_WD_TIMER_4_CTL2               0x444
 #define MDP_WD_TIMER_4_LOAD_VALUE         0x448
 
-#define LTM_SW_FUSE_OFFSET 0x10
-
 #define MDP_TICK_COUNT                    16
 #define XO_CLK_RATE                       19200
 #define MS_TICKS_IN_SEC                   1000
@@ -622,33 +620,3 @@ void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
 	kfree(mdp);
 }
 
-struct sde_hw_sw_fuse *sde_hw_sw_fuse_init(void __iomem *addr,
-	u32 sw_fuse_len, const struct sde_mdss_cfg *m)
-{
-	struct sde_hw_sw_fuse *c;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	c->hw.base_off = addr;
-	c->hw.blk_off = 0;
-	c->hw.length = sw_fuse_len;
-	c->hw.hwversion = m->hwversion;
-
-	return c;
-}
-
-void sde_hw_sw_fuse_destroy(struct sde_hw_sw_fuse *sw_fuse)
-{
-	kfree(sw_fuse);
-}
-
-u32 sde_hw_get_ltm_sw_fuse_value(struct sde_hw_sw_fuse *sw_fuse)
-{
-	u32 ltm_sw_fuse = 0;
-
-	if (sw_fuse)
-		ltm_sw_fuse = SDE_REG_READ(&sw_fuse->hw, LTM_SW_FUSE_OFFSET);
-	return ltm_sw_fuse;
-}

+ 0 - 25
msm/sde/sde_hw_top.h

@@ -234,12 +234,6 @@ struct sde_hw_sid {
 	struct sde_hw_blk_reg_map hw;
 };
 
-#define SW_FUSE_ENABLE 0x1
-struct sde_hw_sw_fuse {
-	/* sw fuse base */
-	struct sde_hw_blk_reg_map hw;
-};
-
 /**
  * sde_hw_sid_rotator_set - initialize the sid blk reg map
  * @addr: Mapped register io address
@@ -277,23 +271,4 @@ struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
 
 void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp);
 
-/**
- * sde_hw_sw_fuse_init - initialize the sw fuse blk reg map
- * @addr: Mapped register io address
- * @sw_fuse_len: Length of block
- * @m: Pointer to mdss catalog data
- */
-struct sde_hw_sw_fuse *sde_hw_sw_fuse_init(void __iomem *addr,
-		u32 sw_fuse_len, const struct sde_mdss_cfg *m);
-/**
- * sde_hw_sw_fuse_destroy - free memory for sw fuse
- * @sw_fuse: sde_hw_sw_fuse
- */
-void sde_hw_sw_fuse_destroy(struct sde_hw_sw_fuse *sw_fuse);
-
-/**
- * sde_hw_get_ltm_sw_fuse_value - read LTM sw fuse register value
- * @sw_fuse: sde_hw_sw_fuse
- */
-u32 sde_hw_get_ltm_sw_fuse_value(struct sde_hw_sw_fuse *sw_fuse);
 #endif /*_SDE_HW_TOP_H */

+ 0 - 29
msm/sde/sde_kms.c

@@ -1707,11 +1707,6 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
 		msm_iounmap(pdev, sde_kms->sid);
 	sde_kms->sid = NULL;
 
-	if (sde_kms->sw_fuse)
-		msm_iounmap(pdev, sde_kms->sw_fuse);
-	sde_hw_sw_fuse_destroy(sde_kms->sw_fuse);
-	sde_kms->sw_fuse = NULL;
-
 	if (sde_kms->reg_dma)
 		msm_iounmap(pdev, sde_kms->reg_dma);
 	sde_kms->reg_dma = NULL;
@@ -3188,19 +3183,6 @@ static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
 	if (rc)
 		SDE_ERROR("dbg base register sid failed: %d\n", rc);
 
-	sde_kms->sw_fuse = msm_ioremap(platformdev, "swfuse_phys",
-					"swfuse_phys");
-	if (IS_ERR(sde_kms->sw_fuse)) {
-		sde_kms->sw_fuse = NULL;
-		SDE_DEBUG("sw_fuse is not defined");
-	} else {
-		sde_kms->sw_fuse_len = msm_iomap_size(platformdev,
-							"swfuse_phys");
-		rc =  sde_dbg_reg_register_base("sw_fuse", sde_kms->sw_fuse,
-						sde_kms->sw_fuse_len);
-		if (rc)
-			SDE_ERROR("dbg base register sw_fuse failed: %d\n", rc);
-	}
 error:
 	return rc;
 }
@@ -3385,17 +3367,6 @@ static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
 		goto perf_err;
 	}
 
-	if (sde_kms->sw_fuse) {
-		sde_kms->hw_sw_fuse = sde_hw_sw_fuse_init(sde_kms->sw_fuse,
-				sde_kms->sw_fuse_len, sde_kms->catalog);
-		if (IS_ERR(sde_kms->hw_sw_fuse)) {
-			SDE_ERROR("failed to init sw_fuse %ld\n",
-					PTR_ERR(sde_kms->hw_sw_fuse));
-			sde_kms->hw_sw_fuse = NULL;
-		}
-	} else {
-		sde_kms->hw_sw_fuse = NULL;
-	}
 	/*
 	 * _sde_kms_drm_obj_init should create the DRM related objects
 	 * i.e. CRTCs, planes, encoders, connectors and so forth

+ 1 - 3
msm/sde/sde_kms.h

@@ -246,9 +246,8 @@ struct sde_kms {
 	struct dentry *debugfs_vbif;
 
 	/* io/register spaces: */
-	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma, *sid, *sw_fuse;
+	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma, *sid;
 	unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len, sid_len;
-	unsigned long sw_fuse_len;
 
 	struct regulator *vdd;
 	struct regulator *mmagic;
@@ -274,7 +273,6 @@ struct sde_kms {
 	struct sde_hw_mdp *hw_mdp;
 	struct sde_hw_uidle *hw_uidle;
 	struct sde_hw_sid *hw_sid;
-	struct sde_hw_sw_fuse *hw_sw_fuse;
 	int dsi_display_count;
 	void **dsi_displays;
 	int wb_display_count;

+ 1 - 0
msm/sde_dbg.h

@@ -80,6 +80,7 @@ struct sde_dbg_evtlog_log {
 	u32 data[SDE_EVTLOG_MAX_DATA];
 	u32 data_cnt;
 	int pid;
+	u8 cpu;
 };
 
 /**

+ 3 - 2
msm/sde_dbg_evtlog.c

@@ -80,6 +80,7 @@ void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
 	log->line = line;
 	log->data_cnt = 0;
 	log->pid = current->pid;
+	log->cpu = current->cpu;
 
 	va_start(args, flag);
 	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
@@ -165,8 +166,8 @@ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
 	}
 
 	off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
-		"=>[%-8d:%-11llu:%9llu][%-4d]:", evtlog->first,
-		log->time, (log->time - prev_log->time), log->pid);
+		"=>[%-8d:%-11llu:%9llu][%-4d]:[%-4d]:", evtlog->first,
+		log->time, (log->time - prev_log->time), log->pid, log->cpu);
 
 	for (i = 0; i < log->data_cnt; i++)
 		off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),