Browse Source

Merge "disp: msm: sde: use wr_ptr interrupt instead of ctl_start"

qctecmdr 6 years ago
parent
commit
9c05197ef9

+ 2 - 1
msm/dp/dp_display.c

@@ -2438,7 +2438,8 @@ static int dp_display_mst_connector_update_link_info(
 	memcpy(&dp_panel->link_info, &dp->panel->link_info,
 	memcpy(&dp_panel->link_info, &dp->panel->link_info,
 			sizeof(dp_panel->link_info));
 			sizeof(dp_panel->link_info));
 
 
-	DP_MST_DEBUG("dp mst connector:%d link info updated\n");
+	DP_MST_DEBUG("dp mst connector:%d link info updated\n",
+		connector->base.id);
 
 
 	return rc;
 	return rc;
 }
 }

+ 2 - 2
msm/dp/dp_panel.c

@@ -1201,7 +1201,7 @@ static void dp_panel_dsc_prepare_pps_packet(struct dp_panel *dp_panel)
 static void _dp_panel_dsc_get_num_extra_pclk(struct msm_display_dsc_info *dsc,
 static void _dp_panel_dsc_get_num_extra_pclk(struct msm_display_dsc_info *dsc,
 				enum msm_display_compression_ratio ratio)
 				enum msm_display_compression_ratio ratio)
 {
 {
-	unsigned int dto_n, dto_d, remainder;
+	unsigned int dto_n = 0, dto_d = 0, remainder;
 	int ack_required, last_few_ack_required, accum_ack;
 	int ack_required, last_few_ack_required, accum_ack;
 	int last_few_pclk, last_few_pclk_required;
 	int last_few_pclk, last_few_pclk_required;
 	int start, temp, line_width = dsc->pic_width/2;
 	int start, temp, line_width = dsc->pic_width/2;
@@ -2819,7 +2819,7 @@ static bool dp_panel_read_mst_cap(struct dp_panel *dp_panel)
 
 
 	if (!dp_panel) {
 	if (!dp_panel) {
 		pr_err("invalid input\n");
 		pr_err("invalid input\n");
-		goto end;
+		return 0;
 	}
 	}
 
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);

+ 2 - 0
msm/msm_drv.c

@@ -1959,6 +1959,8 @@ static int msm_pdev_probe(struct platform_device *pdev)
 	ret = add_display_components(&pdev->dev, &match);
 	ret = add_display_components(&pdev->dev, &match);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
+	if (!match)
+		return -ENODEV;
 
 
 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);

+ 4 - 4
msm/msm_drv.h

@@ -65,11 +65,11 @@ struct msm_gem_address_space;
 struct msm_gem_vma;
 struct msm_gem_vma;
 
 
 #define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
 #define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
-#define MAX_CRTCS      8
+#define MAX_CRTCS      16
 #define MAX_PLANES     20
 #define MAX_PLANES     20
-#define MAX_ENCODERS   8
-#define MAX_BRIDGES    8
-#define MAX_CONNECTORS 8
+#define MAX_ENCODERS   16
+#define MAX_BRIDGES    16
+#define MAX_CONNECTORS 16
 
 
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
 
 

+ 2 - 3
msm/msm_gem_prime.c

@@ -95,7 +95,6 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
 	struct msm_drm_private *priv;
 	struct msm_drm_private *priv;
 	struct msm_kms *kms;
 	struct msm_kms *kms;
 	int ret;
 	int ret;
-	u32 domain;
 
 
 	if (!dma_buf || !dev->dev_private)
 	if (!dma_buf || !dev->dev_private)
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
@@ -142,14 +141,14 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
 				|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
 				|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
 			attach_dev = dev->dev;
 			attach_dev = dev->dev;
 		else
 		else
-			DRM_ERROR("invalid ion secure flag: 0x%x\n", flags);
+			DRM_ERROR("invalid ion secure flag: 0x%lx\n", flags);
 	} else {
 	} else {
 		attach_dev = kms->funcs->get_address_space_device(kms,
 		attach_dev = kms->funcs->get_address_space_device(kms,
 						MSM_SMMU_DOMAIN_UNSECURE);
 						MSM_SMMU_DOMAIN_UNSECURE);
 	}
 	}
 
 
 	if (!attach_dev) {
 	if (!attach_dev) {
-		DRM_ERROR("aspace device not found for domain:%d\n", domain);
+		DRM_ERROR("aspace device not found for domain\n");
 		ret = -EINVAL;
 		ret = -EINVAL;
 		goto fail_put;
 		goto fail_put;
 	}
 	}

+ 3 - 0
msm/sde/sde_color_processing.c

@@ -2898,6 +2898,9 @@ static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
 		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
 		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
 		return;
 		return;
 	}
 	}
+
+	memset(&phase, 0, sizeof(phase));
+
 	/* read intr_status register value */
 	/* read intr_status register value */
 	num_mixers = sde_crtc->num_mixers;
 	num_mixers = sde_crtc->num_mixers;
 	if (!num_mixers)
 	if (!num_mixers)

+ 1 - 1
msm/sde/sde_connector.c

@@ -1154,7 +1154,7 @@ static int _sde_connector_set_ext_hdr_info(
 
 
 	/* verify 1st header byte, programmed in DP Infoframe SDP header */
 	/* verify 1st header byte, programmed in DP Infoframe SDP header */
 	if (payload_size < 1 || (payload[0] != HDR10_PLUS_VSIF_TYPE_CODE)) {
 	if (payload_size < 1 || (payload[0] != HDR10_PLUS_VSIF_TYPE_CODE)) {
-		SDE_ERROR_CONN(c_conn, "invalid payload detected, size: %d\n",
+		SDE_ERROR_CONN(c_conn, "invalid payload detected, size: %zd\n",
 				payload_size);
 				payload_size);
 		rc = -EINVAL;
 		rc = -EINVAL;
 		goto end;
 		goto end;

+ 0 - 4
msm/sde/sde_core_perf.c

@@ -216,10 +216,6 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	/* we only need bandwidth check on real-time clients (interfaces) */
-	if (sde_crtc_get_client_type(crtc) == NRT_CLIENT)
-		return 0;
-
 	sde_cstate = to_sde_crtc_state(state);
 	sde_cstate = to_sde_crtc_state(state);
 
 
 	/* obtain new values */
 	/* obtain new values */

+ 9 - 0
msm/sde/sde_crtc.c

@@ -886,6 +886,11 @@ static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
 	u32 disp_bitmask = 0;
 	u32 disp_bitmask = 0;
 	int i;
 	int i;
 
 
+	if (!crtc || !state) {
+		pr_err("Invalid crtc or state\n");
+		return 0;
+	}
+
 	sde_crtc = to_sde_crtc(crtc);
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 	crtc_state = to_sde_crtc_state(state);
 
 
@@ -5006,6 +5011,10 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 			catalog->perf.amortizable_threshold);
 			catalog->perf.amortizable_threshold);
 	sde_kms_info_add_keyint(info, "min_prefill_lines",
 	sde_kms_info_add_keyint(info, "min_prefill_lines",
 			catalog->perf.min_prefill_lines);
 			catalog->perf.min_prefill_lines);
+	sde_kms_info_add_keyint(info, "num_mnoc_ports",
+			catalog->perf.num_mnoc_ports);
+	sde_kms_info_add_keyint(info, "axi_bus_width",
+			catalog->perf.axi_bus_width);
 	sde_kms_info_add_keyint(info, "sec_ui_blendstage",
 	sde_kms_info_add_keyint(info, "sec_ui_blendstage",
 			catalog->sui_supported_blendstage);
 			catalog->sui_supported_blendstage);
 
 

+ 13 - 0
msm/sde/sde_crtc.h

@@ -218,6 +218,7 @@ struct sde_crtc_misr_info {
  * @output_fence  : output release fence context
  * @output_fence  : output release fence context
  * @stage_cfg     : H/w mixer stage configuration
  * @stage_cfg     : H/w mixer stage configuration
  * @debugfs_root  : Parent of debugfs node
  * @debugfs_root  : Parent of debugfs node
+ * @priv_handle   : Pointer to external private handle, if present
  * @vblank_cb_count : count of vblank callback since last reset
  * @vblank_cb_count : count of vblank callback since last reset
  * @play_count    : frame count between crtc enable and disable
  * @play_count    : frame count between crtc enable and disable
  * @vblank_cb_time  : ktime at vblank count reset
  * @vblank_cb_time  : ktime at vblank count reset
@@ -286,6 +287,7 @@ struct sde_crtc {
 
 
 	struct sde_hw_stage_cfg stage_cfg;
 	struct sde_hw_stage_cfg stage_cfg;
 	struct dentry *debugfs_root;
 	struct dentry *debugfs_root;
+	void *priv_handle;
 
 
 	u32 vblank_cb_count;
 	u32 vblank_cb_count;
 	u64 play_count;
 	u64 play_count;
@@ -479,6 +481,17 @@ static inline int sde_crtc_get_mixer_height(struct sde_crtc *sde_crtc,
 			cstate->ds_cfg[0].lm_height : mode->vdisplay);
 			cstate->ds_cfg[0].lm_height : mode->vdisplay);
 }
 }
 
 
+/**
+ * sde_crtc_get_num_datapath - get the number of datapath active
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int sde_crtc_get_num_datapath(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+	return sde_crtc ? sde_crtc->num_mixers : 0;
+}
+
 /**
 /**
  * sde_crtc_frame_pending - retun the number of pending frames
  * sde_crtc_frame_pending - retun the number of pending frames
  * @crtc: Pointer to drm crtc object
  * @crtc: Pointer to drm crtc object

+ 35 - 5
msm/sde/sde_encoder.c

@@ -166,6 +166,7 @@ enum sde_enc_rc_states {
  * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @bus_scaling_client:	Client handle to the bus scaling interface
  * @bus_scaling_client:	Client handle to the bus scaling interface
  * @te_source:		vsync source pin information
  * @te_source:		vsync source pin information
+ * @ops:		Encoder ops from init function
  * @num_phys_encs:	Actual number of physical encoders contained.
  * @num_phys_encs:	Actual number of physical encoders contained.
  * @phys_encs:		Container of physical encoders managed.
  * @phys_encs:		Container of physical encoders managed.
  * @phys_vid_encs:		Video physical encoders for panel mode switch.
  * @phys_vid_encs:		Video physical encoders for panel mode switch.
@@ -234,6 +235,8 @@ struct sde_encoder_virt {
 	uint32_t display_num_of_h_tiles;
 	uint32_t display_num_of_h_tiles;
 	uint32_t te_source;
 	uint32_t te_source;
 
 
+	struct sde_encoder_ops ops;
+
 	unsigned int num_phys_encs;
 	unsigned int num_phys_encs;
 	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct sde_encoder_phys *phys_vid_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct sde_encoder_phys *phys_vid_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
@@ -2849,7 +2852,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
 
 		if (phys) {
 		if (phys) {
-			if (!sde_enc->hw_pp[i]) {
+			if (!sde_enc->hw_pp[i] && sde_enc->topology.num_intf) {
 				SDE_ERROR_ENC(sde_enc,
 				SDE_ERROR_ENC(sde_enc,
 				    "invalid pingpong block for the encoder\n");
 				    "invalid pingpong block for the encoder\n");
 				return;
 				return;
@@ -3315,8 +3318,7 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_virt *sde_enc;
 
 
 	if (wb_enc) {
 	if (wb_enc) {
-		if (sde_encoder_helper_reset_mixers(phys_enc,
-				wb_enc->fb_disable))
+		if (sde_encoder_helper_reset_mixers(phys_enc, NULL))
 			return;
 			return;
 
 
 		if (wb_enc->hw_wb->ops.bind_pingpong_blk) {
 		if (wb_enc->hw_wb->ops.bind_pingpong_blk) {
@@ -5357,6 +5359,23 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
 		SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
 		SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
 				i, controller_id, phys_params.split_role);
 				i, controller_id, phys_params.split_role);
 
 
+		if (sde_enc->ops.phys_init) {
+			struct sde_encoder_phys *enc;
+
+			enc = sde_enc->ops.phys_init(intf_type,
+					controller_id,
+					&phys_params);
+			if (enc) {
+				sde_enc->phys_encs[sde_enc->num_phys_encs] =
+					enc;
+				++sde_enc->num_phys_encs;
+			} else
+				SDE_ERROR_ENC(sde_enc,
+						"failed to add phys encs\n");
+
+			continue;
+		}
+
 		if (intf_type == INTF_WB) {
 		if (intf_type == INTF_WB) {
 			phys_params.intf_idx = INTF_MAX;
 			phys_params.intf_idx = INTF_MAX;
 			phys_params.wb_idx = sde_encoder_get_wb(
 			phys_params.wb_idx = sde_encoder_get_wb(
@@ -5429,9 +5448,10 @@ static const struct drm_encoder_funcs sde_encoder_funcs = {
 		.early_unregister = sde_encoder_early_unregister,
 		.early_unregister = sde_encoder_early_unregister,
 };
 };
 
 
-struct drm_encoder *sde_encoder_init(
+struct drm_encoder *sde_encoder_init_with_ops(
 		struct drm_device *dev,
 		struct drm_device *dev,
-		struct msm_display_info *disp_info)
+		struct msm_display_info *disp_info,
+		const struct sde_encoder_ops *ops)
 {
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct sde_kms *sde_kms = to_sde_kms(priv->kms);
 	struct sde_kms *sde_kms = to_sde_kms(priv->kms);
@@ -5448,6 +5468,9 @@ struct drm_encoder *sde_encoder_init(
 		goto fail;
 		goto fail;
 	}
 	}
 
 
+	if (ops)
+		sde_enc->ops = *ops;
+
 	mutex_init(&sde_enc->enc_lock);
 	mutex_init(&sde_enc->enc_lock);
 	ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
 	ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
 			&drm_enc_mode);
 			&drm_enc_mode);
@@ -5519,6 +5542,13 @@ fail:
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
 }
 }
 
 
+struct drm_encoder *sde_encoder_init(
+		struct drm_device *dev,
+		struct msm_display_info *disp_info)
+{
+	return sde_encoder_init_with_ops(dev, disp_info, NULL);
+}
+
 int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
 int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
 	enum msm_event_wait event)
 	enum msm_event_wait event)
 {
 {

+ 30 - 2
msm/sde/sde_encoder.h

@@ -70,6 +70,22 @@ struct sde_encoder_kickoff_params {
 	enum frame_trigger_mode_type frame_trigger_mode;
 	enum frame_trigger_mode_type frame_trigger_mode;
 };
 };
 
 
+/**
+ * struct sde_encoder_ops - callback functions for generic sde encoder
+ * Individual callbacks documented below.
+ */
+struct sde_encoder_ops {
+	/**
+	 * phys_init - phys initialization function
+	 * @type: controller type
+	 * @controller_id: controller id
+	 * @phys_init_params: Pointer of structure sde_enc_phys_init_params
+	 * Returns: Pointer of sde_encoder_phys, NULL if failed
+	 */
+	void *(*phys_init)(enum sde_intf_type type,
+			u32 controller_id, void *phys_init_params);
+};
+
 /**
 /**
  * sde_encoder_get_hw_resources - Populate table of required hardware resources
  * sde_encoder_get_hw_resources - Populate table of required hardware resources
  * @encoder:	encoder pointer
  * @encoder:	encoder pointer
@@ -147,11 +163,11 @@ void sde_encoder_kickoff(struct drm_encoder *encoder, bool is_error);
  * @encoder:	encoder pointer
  * @encoder:	encoder pointer
  * @event:      event to wait for
  * @event:      event to wait for
  * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
  * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
- *                        frames to hardware at a vblank or ctl_start
+ *                        frames to hardware at a vblank or wr_ptr_start
  *                        Encoders will map this differently depending on the
  *                        Encoders will map this differently depending on the
  *                        panel type.
  *                        panel type.
  *	                  vid mode -> vsync_irq
  *	                  vid mode -> vsync_irq
- *                        cmd mode -> ctl_start
+ *                        cmd mode -> wr_ptr_start_irq
  * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
  * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
  *                        the panel. Encoders will map this differently
  *                        the panel. Encoders will map this differently
  *                        depending on the panel type.
  *                        depending on the panel type.
@@ -220,6 +236,18 @@ struct drm_encoder *sde_encoder_init(
 		struct drm_device *dev,
 		struct drm_device *dev,
 		struct msm_display_info *disp_info);
 		struct msm_display_info *disp_info);
 
 
+/**
+ * sde_encoder_init_with_ops - initialize virtual encoder object with init ops
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * @ops:        Pointer to encoder ops structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *sde_encoder_init_with_ops(
+		struct drm_device *dev,
+		struct msm_display_info *disp_info,
+		const struct sde_encoder_ops *ops);
+
 /**
 /**
  * sde_encoder_destroy - destroy previously initialized virtual encoder
  * sde_encoder_destroy - destroy previously initialized virtual encoder
  * @drm_enc:    Pointer to previously created drm encoder structure
  * @drm_enc:    Pointer to previously created drm encoder structure

+ 7 - 15
msm/sde/sde_encoder_phys.h

@@ -189,16 +189,18 @@ struct sde_encoder_phys_ops {
 /**
 /**
  * enum sde_intr_idx - sde encoder interrupt index
  * enum sde_intr_idx - sde encoder interrupt index
  * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
  * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
- * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
- * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
- * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done interrupt for cmd mode panel
  * @INTR_IDX_WB_DONE:  Writeback done interrupt for WB
  * @INTR_IDX_WB_DONE:  Writeback done interrupt for WB
+ * @INTR_IDX_PP1_OVFL: Pingpong overflow interrupt on PP1 for Concurrent WB
  * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB
  * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB
  * @INTR_IDX_PP3_OVFL: Pingpong overflow interrupt on PP3 for Concurrent WB
  * @INTR_IDX_PP3_OVFL: Pingpong overflow interrupt on PP3 for Concurrent WB
  * @INTR_IDX_PP4_OVFL: Pingpong overflow interrupt on PP4 for Concurrent WB
  * @INTR_IDX_PP4_OVFL: Pingpong overflow interrupt on PP4 for Concurrent WB
  * @INTR_IDX_PP5_OVFL: Pingpong overflow interrupt on PP5 for Concurrent WB
  * @INTR_IDX_PP5_OVFL: Pingpong overflow interrupt on PP5 for Concurrent WB
  * @INTR_IDX_AUTOREFRESH_DONE:  Autorefresh done for cmd mode panel meaning
  * @INTR_IDX_AUTOREFRESH_DONE:  Autorefresh done for cmd mode panel meaning
  *                              autorefresh has triggered a double buffer flip
  *                              autorefresh has triggered a double buffer flip
+ * @INTR_IDX_WRPTR:    Writepointer start interrupt for cmd mode panel
  */
  */
 enum sde_intr_idx {
 enum sde_intr_idx {
 	INTR_IDX_VSYNC,
 	INTR_IDX_VSYNC,
@@ -208,10 +210,12 @@ enum sde_intr_idx {
 	INTR_IDX_RDPTR,
 	INTR_IDX_RDPTR,
 	INTR_IDX_AUTOREFRESH_DONE,
 	INTR_IDX_AUTOREFRESH_DONE,
 	INTR_IDX_WB_DONE,
 	INTR_IDX_WB_DONE,
+	INTR_IDX_PP1_OVFL,
 	INTR_IDX_PP2_OVFL,
 	INTR_IDX_PP2_OVFL,
 	INTR_IDX_PP3_OVFL,
 	INTR_IDX_PP3_OVFL,
 	INTR_IDX_PP4_OVFL,
 	INTR_IDX_PP4_OVFL,
 	INTR_IDX_PP5_OVFL,
 	INTR_IDX_PP5_OVFL,
+	INTR_IDX_WRPTR,
 	INTR_IDX_MAX,
 	INTR_IDX_MAX,
 };
 };
 
 
@@ -272,8 +276,6 @@ struct sde_encoder_irq {
  *				vs. the number of done/vblank irqs. Should hover
  *				vs. the number of done/vblank irqs. Should hover
  *				between 0-2 Incremented when a new kickoff is
  *				between 0-2 Incremented when a new kickoff is
  *				scheduled. Decremented in irq handler
  *				scheduled. Decremented in irq handler
- * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
- *                              pending.
  * @pending_retire_fence_cnt:   Atomic counter tracking the pending retire
  * @pending_retire_fence_cnt:   Atomic counter tracking the pending retire
  *                              fences that have to be signalled.
  *                              fences that have to be signalled.
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
@@ -317,7 +319,6 @@ struct sde_encoder_phys {
 	atomic_t wbirq_refcount;
 	atomic_t wbirq_refcount;
 	atomic_t vsync_cnt;
 	atomic_t vsync_cnt;
 	atomic_t underrun_cnt;
 	atomic_t underrun_cnt;
-	atomic_t pending_ctlstart_cnt;
 	atomic_t pending_kickoff_cnt;
 	atomic_t pending_kickoff_cnt;
 	atomic_t pending_retire_fence_cnt;
 	atomic_t pending_retire_fence_cnt;
 	wait_queue_head_t pending_kickoff_wq;
 	wait_queue_head_t pending_kickoff_wq;
@@ -332,7 +333,6 @@ struct sde_encoder_phys {
 
 
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
 {
 {
-	atomic_inc_return(&phys->pending_ctlstart_cnt);
 	return atomic_inc_return(&phys->pending_kickoff_cnt);
 	return atomic_inc_return(&phys->pending_kickoff_cnt);
 }
 }
 
 
@@ -369,24 +369,16 @@ struct sde_encoder_phys_cmd_autorefresh {
  * @stream_sel:	Stream selection for multi-stream interfaces
  * @stream_sel:	Stream selection for multi-stream interfaces
  * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
  * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
  * @autorefresh: autorefresh feature state
  * @autorefresh: autorefresh feature state
- * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be
- *                      signaled at the next rd_ptr_irq
- * @rd_ptr_timestamp: last rd_ptr_irq timestamp
  * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
  * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
  * @pending_vblank_wq: Wait queue for blocking until VBLANK received
  * @pending_vblank_wq: Wait queue for blocking until VBLANK received
- * @ctl_start_threshold: A threshold in microseconds allows command mode
- *   engine to trigger the retire fence without waiting for rd_ptr.
  */
  */
 struct sde_encoder_phys_cmd {
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
 	struct sde_encoder_phys base;
 	int stream_sel;
 	int stream_sel;
 	int pp_timeout_report_cnt;
 	int pp_timeout_report_cnt;
 	struct sde_encoder_phys_cmd_autorefresh autorefresh;
 	struct sde_encoder_phys_cmd_autorefresh autorefresh;
-	atomic_t pending_rd_ptr_cnt;
-	ktime_t rd_ptr_timestamp;
 	atomic_t pending_vblank_cnt;
 	atomic_t pending_vblank_cnt;
 	wait_queue_head_t pending_vblank_wq;
 	wait_queue_head_t pending_vblank_wq;
-	u32 ctl_start_threshold;
 };
 };
 
 
 /**
 /**

+ 62 - 140
msm/sde/sde_encoder_phys_cmd.c

@@ -35,12 +35,6 @@
 
 
 #define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
 #define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
 
 
-/*
- * Threshold for signalling retire fences in cases where
- * CTL_START_IRQ is received just after RD_PTR_IRQ
- */
-#define SDE_ENC_CTL_START_THRESHOLD_US 500
-
 #define SDE_ENC_MAX_POLL_TIMEOUT_US	2000
 #define SDE_ENC_MAX_POLL_TIMEOUT_US	2000
 
 
 static inline int _sde_encoder_phys_cmd_get_idle_timeout(
 static inline int _sde_encoder_phys_cmd_get_idle_timeout(
@@ -203,23 +197,6 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt, event);
 			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt, event);
 
 
-	/*
-	 * Reduce the refcount for the retire fence as well as for the ctl_start
-	 * if the counters are greater than zero. Signal retire fence if there
-	 * was a retire fence count pending and kickoff count is zero.
-	 */
-	if (sde_encoder_phys_cmd_is_master(phys_enc) && (new_cnt == 0)) {
-		while (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
-				-1, 0)) {
-			if (phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
-					phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-			atomic_add_unless(&phys_enc->pending_ctlstart_cnt,
-				-1, 0);
-		}
-	}
-
 	/* Signal any waiting atomic commit thread */
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
 	wake_up_all(&phys_enc->pending_kickoff_wq);
 	SDE_ATRACE_END("pp_done_irq");
 	SDE_ATRACE_END("pp_done_irq");
@@ -254,7 +231,7 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 {
 {
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys_cmd *cmd_enc;
 	struct sde_encoder_phys_cmd *cmd_enc;
-	u32 event = 0, scheduler_status = INVALID_CTL_STATUS;
+	u32 scheduler_status = INVALID_CTL_STATUS;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 
 
@@ -265,101 +242,53 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 	ctl = phys_enc->hw_ctl;
 	ctl = phys_enc->hw_ctl;
 
 
-	/**
-	 * signal only for master, when the ctl_start irq is
-	 * done and incremented the pending_rd_ptr_cnt.
-	 */
-	if (sde_encoder_phys_cmd_is_master(phys_enc)
-		    && atomic_add_unless(&cmd_enc->pending_rd_ptr_cnt, -1, 0)
-		    && atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
-
-		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
-		if (phys_enc->parent_ops.handle_frame_done)
-			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc, event);
-	}
-
 	if (ctl && ctl->ops.get_scheduler_status)
 	if (ctl && ctl->ops.get_scheduler_status)
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 
 
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 		info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count,
 		info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count,
-		event, scheduler_status,
-		info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count);
+		info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count,
+		scheduler_status);
 
 
 	if (phys_enc->parent_ops.handle_vblank_virt)
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 			phys_enc);
 
 
-	cmd_enc->rd_ptr_timestamp = ktime_get();
-
 	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
 	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
 	wake_up_all(&cmd_enc->pending_vblank_wq);
 	wake_up_all(&cmd_enc->pending_vblank_wq);
 	SDE_ATRACE_END("rd_ptr_irq");
 	SDE_ATRACE_END("rd_ptr_irq");
 }
 }
 
 
-static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx)
 {
 {
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys *phys_enc = arg;
-	struct sde_encoder_phys_cmd *cmd_enc;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_ctl *ctl;
 	u32 event = 0;
 	u32 event = 0;
-	s64 time_diff_us;
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 
 
 	if (!phys_enc || !phys_enc->hw_ctl)
 	if (!phys_enc || !phys_enc->hw_ctl)
 		return;
 		return;
 
 
-	SDE_ATRACE_BEGIN("ctl_start_irq");
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-
+	SDE_ATRACE_BEGIN("wr_ptr_irq");
 	ctl = phys_enc->hw_ctl;
 	ctl = phys_enc->hw_ctl;
-	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
-
-	time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
-
-	/* handle retire fence based on only master */
-	if (sde_encoder_phys_cmd_is_master(phys_enc)
-			&& atomic_read(&phys_enc->pending_retire_fence_cnt)) {
-		/**
-		 * Handle rare cases where the ctl_start_irq is received
-		 * after rd_ptr_irq. If it falls within a threshold, it is
-		 * guaranteed the frame would be picked up in the current TE.
-		 * Signal retire fence immediately in such case. The threshold
-		 * timer adds extra line time duration based on lowest panel
-		 * fps for qsync enabled case.
-		 */
-		if ((time_diff_us <= cmd_enc->ctl_start_threshold)
-			    && atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
-
-			event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
 
 
-			if (phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
+	if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) {
+		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+		if (phys_enc->parent_ops.handle_frame_done)
+			phys_enc->parent_ops.handle_frame_done(
 					phys_enc->parent, phys_enc, event);
 					phys_enc->parent, phys_enc, event);
-
-		/**
-		 * In ideal cases, ctl_start_irq is received before the
-		 * rd_ptr_irq, so set the atomic flag to indicate the event
-		 * and rd_ptr_irq will handle signalling the retire fence
-		 */
-		} else {
-			atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
-		}
 	}
 	}
 
 
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-		ctl->idx - CTL_0, time_diff_us, event,
+		ctl->idx - CTL_0, event,
 		info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count,
 		info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count,
 		info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count);
 		info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count);
 
 
-	/* Signal any waiting ctl start interrupt */
+	/* Signal any waiting wr_ptr start interrupt */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
 	wake_up_all(&phys_enc->pending_kickoff_wq);
-	SDE_ATRACE_END("ctl_start_irq");
+	SDE_ATRACE_END("wr_ptr_irq");
 }
 }
 
 
 static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
 static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
@@ -415,6 +344,14 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
 		irq->hw_idx = phys_enc->hw_intf->idx;
 		irq->hw_idx = phys_enc->hw_intf->idx;
 	else
 	else
 		irq->hw_idx = phys_enc->hw_pp->idx;
 		irq->hw_idx = phys_enc->hw_pp->idx;
+
+	irq = &phys_enc->irq[INTR_IDX_WRPTR];
+	irq->irq_idx = -EINVAL;
+	if (phys_enc->has_intf_te)
+		irq->hw_idx = phys_enc->hw_intf->idx;
+	else
+		irq->hw_idx = phys_enc->hw_pp->idx;
+
 }
 }
 
 
 static void sde_encoder_phys_cmd_cont_splash_mode_set(
 static void sde_encoder_phys_cmd_cont_splash_mode_set(
@@ -532,17 +469,6 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
 	cmd_enc->pp_timeout_report_cnt++;
 	cmd_enc->pp_timeout_report_cnt++;
 	pending_kickoff_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
 	pending_kickoff_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
 
 
-	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-		 /* trigger the retire fence if it was missed */
-		if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
-				-1, 0))
-			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent,
-				phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-		atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
-	}
-
 	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
 	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
 			cmd_enc->pp_timeout_report_cnt,
 			cmd_enc->pp_timeout_report_cnt,
 			pending_kickoff_cnt,
 			pending_kickoff_cnt,
@@ -877,12 +803,11 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
 
 
 	if (enable) {
 	if (enable) {
 		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
 		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
-		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
 
 
 		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 			sde_encoder_helper_register_irq(phys_enc,
 			sde_encoder_helper_register_irq(phys_enc,
-					INTR_IDX_CTL_START);
+					INTR_IDX_WRPTR);
 			sde_encoder_helper_register_irq(phys_enc,
 			sde_encoder_helper_register_irq(phys_enc,
 					INTR_IDX_AUTOREFRESH_DONE);
 					INTR_IDX_AUTOREFRESH_DONE);
 		}
 		}
@@ -890,12 +815,11 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
 	} else {
 	} else {
 		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 			sde_encoder_helper_unregister_irq(phys_enc,
 			sde_encoder_helper_unregister_irq(phys_enc,
-					INTR_IDX_CTL_START);
+					INTR_IDX_WRPTR);
 			sde_encoder_helper_unregister_irq(phys_enc,
 			sde_encoder_helper_unregister_irq(phys_enc,
 					INTR_IDX_AUTOREFRESH_DONE);
 					INTR_IDX_AUTOREFRESH_DONE);
 		}
 		}
 
 
-		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
 		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
 		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
 	}
 	}
@@ -1051,33 +975,31 @@ static void sde_encoder_phys_cmd_tearcheck_config(
 	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
 	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
 	tc_cfg.start_pos = mode->vdisplay;
 	tc_cfg.start_pos = mode->vdisplay;
 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
-
-	cmd_enc->ctl_start_threshold = (extra_frame_trigger_time / 1000) +
-			SDE_ENC_CTL_START_THRESHOLD_US;
+	tc_cfg.wr_ptr_irq = 1;
 
 
 	SDE_DEBUG_CMDENC(cmd_enc,
 	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+	  "tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		vsync_hz, mode->vtotal, mode->vrefresh);
 		vsync_hz, mode->vtotal, mode->vrefresh);
 	SDE_DEBUG_CMDENC(cmd_enc,
 	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d enable %u start_pos %u rd_ptr_irq %u\n",
+	  "tc %d intf %d enable %u start_pos %u rd_ptr_irq %u wr_ptr_irq %u\n",
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		phys_enc->hw_intf->idx - INTF_0,
-		tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
+		tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq,
+		tc_cfg.wr_ptr_irq);
 	SDE_DEBUG_CMDENC(cmd_enc,
 	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+	  "tc %d intf %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
 		tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
 		tc_cfg.vsync_init_val);
 		tc_cfg.vsync_init_val);
 	SDE_DEBUG_CMDENC(cmd_enc,
 	SDE_DEBUG_CMDENC(cmd_enc,
-		"tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u ctl_start_threshold:%d\n",
+	  "tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u\n",
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_pp->idx - PINGPONG_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		phys_enc->hw_intf->idx - INTF_0,
 		tc_cfg.sync_cfg_height,
 		tc_cfg.sync_cfg_height,
-		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue,
-		cmd_enc->ctl_start_threshold);
+		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
 
 
 	if (phys_enc->has_intf_te) {
 	if (phys_enc->has_intf_te) {
 		phys_enc->hw_intf->ops.setup_tearcheck(phys_enc->hw_intf,
 		phys_enc->hw_intf->ops.setup_tearcheck(phys_enc->hw_intf,
@@ -1360,6 +1282,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
 	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
 	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
 			atomic_read(&phys_enc->pending_kickoff_cnt),
 			atomic_read(&phys_enc->pending_kickoff_cnt),
 			atomic_read(&cmd_enc->autorefresh.kickoff_cnt));
 			atomic_read(&cmd_enc->autorefresh.kickoff_cnt));
+	phys_enc->frame_trigger_mode = params->frame_trigger_mode;
 
 
 	if (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_DEFAULT) {
 	if (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_DEFAULT) {
 		/*
 		/*
@@ -1387,12 +1310,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
 		else if (phys_enc->hw_pp->ops.update_tearcheck)
 		else if (phys_enc->hw_pp->ops.update_tearcheck)
 			phys_enc->hw_pp->ops.update_tearcheck(
 			phys_enc->hw_pp->ops.update_tearcheck(
 					phys_enc->hw_pp, &tc_cfg);
 					phys_enc->hw_pp, &tc_cfg);
-
-		cmd_enc->ctl_start_threshold =
-			(extra_frame_trigger_time / 1000) +
-				SDE_ENC_CTL_START_THRESHOLD_US;
-		SDE_EVT32(DRMID(phys_enc->parent),
-		    tc_cfg.sync_threshold_start, cmd_enc->ctl_start_threshold);
+		SDE_EVT32(DRMID(phys_enc->parent), tc_cfg.sync_threshold_start);
 	}
 	}
 
 
 	SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
 	SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
@@ -1401,7 +1319,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
 	return ret;
 	return ret;
 }
 }
 
 
-static int _sde_encoder_phys_cmd_wait_for_ctl_start(
+static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		struct sde_encoder_phys *phys_enc)
 		struct sde_encoder_phys *phys_enc)
 {
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 	struct sde_encoder_phys_cmd *cmd_enc =
@@ -1418,14 +1336,14 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
 	ctl = phys_enc->hw_ctl;
 	ctl = phys_enc->hw_ctl;
 
 
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
-	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+	wait_info.atomic_cnt = &phys_enc->pending_retire_fence_cnt;
 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
 
 
 	/* slave encoder doesn't enable for ppsplit */
 	/* slave encoder doesn't enable for ppsplit */
 	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
 	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
 		return 0;
 		return 0;
 
 
-	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WRPTR,
 			&wait_info);
 			&wait_info);
 	if (ret == -ETIMEDOUT) {
 	if (ret == -ETIMEDOUT) {
 		struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
 		struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
@@ -1435,27 +1353,24 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
 
 
 		if (frame_pending)
 		if (frame_pending)
 			SDE_ERROR_CMDENC(cmd_enc,
 			SDE_ERROR_CMDENC(cmd_enc,
-					"ctl start interrupt wait failed\n");
+				"wr_ptrt start interrupt wait failed\n");
 		else
 		else
 			ret = 0;
 			ret = 0;
 
 
-		if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-			/*
-			 * Signaling the retire fence at ctl start timeout
-			 * to allow the next commit and avoid device freeze.
-			 * As ctl start timeout can occurs due to no read ptr,
-			 * updating pending_rd_ptr_cnt here may not cover all
-			 * cases. Hence signaling the retire fence.
-			 */
-			if (atomic_add_unless(
-			 &phys_enc->pending_retire_fence_cnt, -1, 0))
-				phys_enc->parent_ops.handle_frame_done(
-				 phys_enc->parent,
-				 phys_enc,
-				 SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-			atomic_add_unless(
-				&phys_enc->pending_ctlstart_cnt, -1, 0);
-		}
+		/*
+		 * Signaling the retire fence at wr_ptr timeout
+		 * to allow the next commit and avoid device freeze.
+		 * As wr_ptr timeout can occurs due to no read ptr,
+		 * updating pending_rd_ptr_cnt here may not cover all
+		 * cases. Hence signaling the retire fence.
+		 */
+		if (sde_encoder_phys_cmd_is_master(phys_enc) &&
+			atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
+				-1, 0))
+			phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+
 	} else if ((ret == 0) &&
 	} else if ((ret == 0) &&
 	    (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) &&
 	    (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) &&
 	    atomic_read(&phys_enc->pending_kickoff_cnt) &&
 	    atomic_read(&phys_enc->pending_kickoff_cnt) &&
@@ -1507,7 +1422,7 @@ static int sde_encoder_phys_cmd_wait_for_commit_done(
 
 
 	/* only required for master controller */
 	/* only required for master controller */
 	if (sde_encoder_phys_cmd_is_master(phys_enc))
 	if (sde_encoder_phys_cmd_is_master(phys_enc))
-		rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+		rc = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
 
 
 	if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) &&
 	if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) &&
 			cmd_enc->autorefresh.cfg.enable)
 			cmd_enc->autorefresh.cfg.enable)
@@ -1742,7 +1657,6 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 	phys_enc->enc_spinlock = p->enc_spinlock;
 	phys_enc->enc_spinlock = p->enc_spinlock;
 	phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
 	phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
 	cmd_enc->stream_sel = 0;
 	cmd_enc->stream_sel = 0;
-	cmd_enc->ctl_start_threshold = SDE_ENC_CTL_START_THRESHOLD_US;
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
 	phys_enc->comp_type = p->comp_type;
 	phys_enc->comp_type = p->comp_type;
@@ -1764,7 +1678,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 	irq->name = "ctl_start";
 	irq->name = "ctl_start";
 	irq->intr_type = SDE_IRQ_TYPE_CTL_START;
 	irq->intr_type = SDE_IRQ_TYPE_CTL_START;
 	irq->intr_idx = INTR_IDX_CTL_START;
 	irq->intr_idx = INTR_IDX_CTL_START;
-	irq->cb.func = sde_encoder_phys_cmd_ctl_start_irq;
+	irq->cb.func = NULL;
 
 
 	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
 	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
 	irq->name = "pp_done";
 	irq->name = "pp_done";
@@ -1800,11 +1714,19 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 	irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE;
 	irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE;
 	irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq;
 	irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq;
 
 
+	irq = &phys_enc->irq[INTR_IDX_WRPTR];
+	irq->intr_idx = INTR_IDX_WRPTR;
+	irq->name = "wr_ptr";
+
+	if (phys_enc->has_intf_te)
+		irq->intr_type = SDE_IRQ_TYPE_INTF_TEAR_WR_PTR;
+	else
+		irq->intr_type = SDE_IRQ_TYPE_PING_PONG_WR_PTR;
+	irq->cb.func = sde_encoder_phys_cmd_wr_ptr_irq;
+
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
-	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
 	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
 	atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
-	atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0);
 	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
 	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);

+ 13 - 3
msm/sde/sde_encoder_phys_wb.c

@@ -24,7 +24,7 @@
 
 
 #define TO_S15D16(_x_)	((_x_) << 7)
 #define TO_S15D16(_x_)	((_x_) << 7)
 
 
-static const u32 cwb_irq_tbl[PINGPONG_MAX] = {SDE_NONE, SDE_NONE,
+static const u32 cwb_irq_tbl[PINGPONG_MAX] = {SDE_NONE, INTR_IDX_PP1_OVFL,
 	INTR_IDX_PP2_OVFL, INTR_IDX_PP3_OVFL, INTR_IDX_PP4_OVFL,
 	INTR_IDX_PP2_OVFL, INTR_IDX_PP3_OVFL, INTR_IDX_PP4_OVFL,
 	INTR_IDX_PP5_OVFL, SDE_NONE, SDE_NONE};
 	INTR_IDX_PP5_OVFL, SDE_NONE, SDE_NONE};
 
 
@@ -142,7 +142,7 @@ static void sde_encoder_phys_wb_set_qos_remap(
 	qos_params.client_type = phys_enc->in_clone_mode ?
 	qos_params.client_type = phys_enc->in_clone_mode ?
 					VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
 					VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
 
 
-	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d clone:%d\n",
+	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d clone:%d\n",
 			qos_params.num,
 			qos_params.num,
 			qos_params.vbif_idx,
 			qos_params.vbif_idx,
 			qos_params.xin_id, qos_params.client_type);
 			qos_params.xin_id, qos_params.client_type);
@@ -1572,7 +1572,7 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
 		goto exit;
 		goto exit;
 	}
 	}
 
 
-	if (sde_encoder_helper_reset_mixers(phys_enc, wb_enc->fb_disable))
+	if (sde_encoder_helper_reset_mixers(phys_enc, NULL))
 		goto exit;
 		goto exit;
 
 
 	phys_enc->enable_state = SDE_ENC_DISABLING;
 	phys_enc->enable_state = SDE_ENC_DISABLING;
@@ -1808,6 +1808,16 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
 	irq->cb.arg = wb_enc;
 	irq->cb.arg = wb_enc;
 	irq->cb.func = sde_encoder_phys_wb_done_irq;
 	irq->cb.func = sde_encoder_phys_wb_done_irq;
 
 
+	irq = &phys_enc->irq[INTR_IDX_PP1_OVFL];
+	INIT_LIST_HEAD(&irq->cb.list);
+	irq->name = "pp1_overflow";
+	irq->hw_idx = CWB_1;
+	irq->irq_idx = -1;
+	irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
+	irq->intr_idx = INTR_IDX_PP1_OVFL;
+	irq->cb.arg = wb_enc;
+	irq->cb.func = sde_encoder_phys_cwb_ovflow;
+
 	irq = &phys_enc->irq[INTR_IDX_PP2_OVFL];
 	irq = &phys_enc->irq[INTR_IDX_PP2_OVFL];
 	INIT_LIST_HEAD(&irq->cb.list);
 	INIT_LIST_HEAD(&irq->cb.list);
 	irq->name = "pp2_overflow";
 	irq->name = "pp2_overflow";

+ 95 - 11
msm/sde/sde_hw_catalog.c

@@ -130,6 +130,8 @@
 		"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
 		"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
 #define DEFAULT_MAX_PER_PIPE_BW			2400000
 #define DEFAULT_MAX_PER_PIPE_BW			2400000
 #define DEFAULT_AMORTIZABLE_THRESHOLD		25
 #define DEFAULT_AMORTIZABLE_THRESHOLD		25
+#define DEFAULT_MNOC_PORTS			2
+#define DEFAULT_AXI_BUS_WIDTH			32
 #define DEFAULT_CPU_MASK			0
 #define DEFAULT_CPU_MASK			0
 #define DEFAULT_CPU_DMA_LATENCY			PM_QOS_DEFAULT_VALUE
 #define DEFAULT_CPU_DMA_LATENCY			PM_QOS_DEFAULT_VALUE
 
 
@@ -213,6 +215,8 @@ enum {
 	PERF_CPU_DMA_LATENCY,
 	PERF_CPU_DMA_LATENCY,
 	PERF_QOS_LUT_MACROTILE_QSEED,
 	PERF_QOS_LUT_MACROTILE_QSEED,
 	PERF_SAFE_LUT_MACROTILE_QSEED,
 	PERF_SAFE_LUT_MACROTILE_QSEED,
+	PERF_NUM_MNOC_PORTS,
+	PERF_AXI_BUS_WIDTH,
 	PERF_PROP_MAX,
 	PERF_PROP_MAX,
 };
 };
 
 
@@ -525,6 +529,10 @@ static struct sde_prop_type sde_perf_prop[] = {
 			false, PROP_TYPE_U32_ARRAY},
 			false, PROP_TYPE_U32_ARRAY},
 	{PERF_SAFE_LUT_MACROTILE_QSEED, "qcom,sde-safe-lut-macrotile-qseed",
 	{PERF_SAFE_LUT_MACROTILE_QSEED, "qcom,sde-safe-lut-macrotile-qseed",
 			false, PROP_TYPE_U32_ARRAY},
 			false, PROP_TYPE_U32_ARRAY},
+	{PERF_NUM_MNOC_PORTS, "qcom,sde-num-mnoc-ports",
+			false, PROP_TYPE_U32},
+	{PERF_AXI_BUS_WIDTH, "qcom,sde-axi-bus-width",
+			false, PROP_TYPE_U32},
 };
 };
 
 
 static struct sde_prop_type sspp_prop[] = {
 static struct sde_prop_type sspp_prop[] = {
@@ -743,6 +751,12 @@ static struct sde_prop_type merge_3d_prop[] = {
 	{HW_OFF, "qcom,sde-merge-3d-off", false, PROP_TYPE_U32_ARRAY},
 	{HW_OFF, "qcom,sde-merge-3d-off", false, PROP_TYPE_U32_ARRAY},
 	{HW_LEN, "qcom,sde-merge-3d-size", false, PROP_TYPE_U32},
 	{HW_LEN, "qcom,sde-merge-3d-size", false, PROP_TYPE_U32},
 };
 };
+
+static struct sde_prop_type qdss_prop[] = {
+	{HW_OFF, "qcom,sde-qdss-off", false, PROP_TYPE_U32_ARRAY},
+	{HW_LEN, "qcom,sde-qdss-size", false, PROP_TYPE_U32},
+};
+
 /*************************************************************
 /*************************************************************
  * static API list
  * static API list
  *************************************************************/
  *************************************************************/
@@ -3470,6 +3484,16 @@ static void _sde_perf_parse_dt_cfg_populate(struct sde_mdss_cfg *cfg,
 			PROP_VALUE_ACCESS(prop_value,
 			PROP_VALUE_ACCESS(prop_value,
 					PERF_AMORTIZABLE_THRESHOLD, 0) :
 					PERF_AMORTIZABLE_THRESHOLD, 0) :
 			DEFAULT_AMORTIZABLE_THRESHOLD;
 			DEFAULT_AMORTIZABLE_THRESHOLD;
+	cfg->perf.num_mnoc_ports =
+			prop_exists[PERF_NUM_MNOC_PORTS] ?
+			PROP_VALUE_ACCESS(prop_value,
+				PERF_NUM_MNOC_PORTS, 0) :
+			DEFAULT_MNOC_PORTS;
+	cfg->perf.axi_bus_width =
+			prop_exists[PERF_AXI_BUS_WIDTH] ?
+			PROP_VALUE_ACCESS(prop_value,
+				PERF_AXI_BUS_WIDTH, 0) :
+			DEFAULT_AXI_BUS_WIDTH;
 }
 }
 
 
 static int _sde_perf_parse_dt_cfg(struct device_node *np,
 static int _sde_perf_parse_dt_cfg(struct device_node *np,
@@ -3585,23 +3609,23 @@ static int sde_parse_merge_3d_dt(struct device_node *np,
 
 
 	prop_value = kcalloc(HW_PROP_MAX, sizeof(struct sde_prop_value),
 	prop_value = kcalloc(HW_PROP_MAX, sizeof(struct sde_prop_value),
 			GFP_KERNEL);
 			GFP_KERNEL);
-	if (!prop_value) {
-		rc = -ENOMEM;
-		goto fail;
-	}
+	if (!prop_value)
+		return -ENOMEM;
 
 
 	rc = _validate_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
 	rc = _validate_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
 		prop_count, &off_count);
 		prop_count, &off_count);
 	if (rc)
 	if (rc)
-		goto error;
+		goto end;
 
 
 	sde_cfg->merge_3d_count = off_count;
 	sde_cfg->merge_3d_count = off_count;
 
 
 	rc = _read_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
 	rc = _read_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
 			prop_count,
 			prop_count,
 			prop_exists, prop_value);
 			prop_exists, prop_value);
-	if (rc)
-		goto error;
+	if (rc) {
+		sde_cfg->merge_3d_count = 0;
+		goto end;
+	}
 
 
 	for (i = 0; i < off_count; i++) {
 	for (i = 0; i < off_count; i++) {
 		merge_3d = sde_cfg->merge_3d + i;
 		merge_3d = sde_cfg->merge_3d + i;
@@ -3612,11 +3636,54 @@ static int sde_parse_merge_3d_dt(struct device_node *np,
 		merge_3d->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
 		merge_3d->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
 	}
 	}
 
 
-	return 0;
-error:
-	sde_cfg->merge_3d_count = 0;
+end:
+	kfree(prop_value);
+	return rc;
+}
+
+int sde_qdss_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[HW_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[HW_PROP_MAX];
+	u32 off_count;
+	struct sde_qdss_cfg *qdss;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	prop_value = kzalloc(HW_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value)
+		return -ENOMEM;
+
+	rc = _validate_dt_entry(np, qdss_prop, ARRAY_SIZE(qdss_prop),
+			prop_count, &off_count);
+	if (rc) {
+		sde_cfg->qdss_count = 0;
+		goto end;
+	}
+
+	sde_cfg->qdss_count = off_count;
+
+	rc = _read_dt_entry(np, qdss_prop, ARRAY_SIZE(qdss_prop), prop_count,
+			prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		qdss = sde_cfg->qdss + i;
+		qdss->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+		qdss->id = QDSS_0 + i;
+		snprintf(qdss->name, SDE_HW_BLK_NAME_LEN, "qdss_%u",
+				qdss->id - QDSS_0);
+		qdss->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
+	}
+
+end:
 	kfree(prop_value);
 	kfree(prop_value);
-fail:
 	return rc;
 	return rc;
 }
 }
 
 
@@ -3916,6 +3983,19 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->true_inline_prefill_fudge_lines = 2;
 		sde_cfg->true_inline_prefill_fudge_lines = 2;
 		sde_cfg->true_inline_prefill_lines_nv12 = 32;
 		sde_cfg->true_inline_prefill_lines_nv12 = 32;
 		sde_cfg->true_inline_prefill_lines = 48;
 		sde_cfg->true_inline_prefill_lines = 48;
+	} else if (IS_SDMTRINKET_TARGET(hw_rev)) {
+		sde_cfg->has_cwb_support = true;
+		sde_cfg->has_qsync = true;
+		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
+		sde_cfg->delay_prg_fetch_start = true;
+		sde_cfg->sui_ns_allowed = true;
+		sde_cfg->sui_misr_supported = true;
+		sde_cfg->sui_block_xin_mask = 0xC61;
+		sde_cfg->has_hdr = false;
+		sde_cfg->has_sui_blendstage = true;
 	} else {
 	} else {
 		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
 		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
 		sde_cfg->perf.min_prefill_lines = 0xffff;
 		sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -4135,6 +4215,10 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
 	if (rc)
 	if (rc)
 		goto end;
 		goto end;
 
 
+	rc = sde_qdss_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = _sde_hardware_post_caps(sde_cfg, hw_rev);
 	rc = _sde_hardware_post_caps(sde_cfg, hw_rev);
 	if (rc)
 	if (rc)
 		goto end;
 		goto end;

+ 22 - 1
msm/sde/sde_hw_catalog.h

@@ -49,6 +49,7 @@
 #define SDE_HW_VER_510	SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
 #define SDE_HW_VER_510	SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
 #define SDE_HW_VER_520	SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
 #define SDE_HW_VER_520	SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
 #define SDE_HW_VER_530	SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
 #define SDE_HW_VER_530	SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
+#define SDE_HW_VER_540	SDE_HW_VER(5, 4, 0) /* sdmtrinket v1.0 */
 #define SDE_HW_VER_600	SDE_HW_VER(6, 0, 0) /* kona */
 #define SDE_HW_VER_600	SDE_HW_VER(6, 0, 0) /* kona */
 #define SDE_HW_VER_610	SDE_HW_VER(6, 1, 0) /* sm7250 */
 #define SDE_HW_VER_610	SDE_HW_VER(6, 1, 0) /* sm7250 */
 
 
@@ -60,6 +61,7 @@
 #define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510)
 #define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510)
 #define IS_SDMMAGPIE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_520)
 #define IS_SDMMAGPIE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_520)
 #define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530)
 #define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530)
+#define IS_SDMTRINKET_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_540)
 #define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
 #define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
 #define IS_SAIPAN_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_610)
 #define IS_SAIPAN_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_610)
 
 
@@ -114,7 +116,8 @@ enum {
 	SDE_HW_UBWC_VER_30 = SDE_HW_UBWC_VER(0x300),
 	SDE_HW_UBWC_VER_30 = SDE_HW_UBWC_VER(0x300),
 	SDE_HW_UBWC_VER_40 = SDE_HW_UBWC_VER(0x400),
 	SDE_HW_UBWC_VER_40 = SDE_HW_UBWC_VER(0x400),
 };
 };
-
+#define IS_UBWC_10_SUPPORTED(rev) \
+		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_10)
 #define IS_UBWC_20_SUPPORTED(rev) \
 #define IS_UBWC_20_SUPPORTED(rev) \
 		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_20)
 		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_20)
 #define IS_UBWC_30_SUPPORTED(rev) \
 #define IS_UBWC_30_SUPPORTED(rev) \
@@ -977,6 +980,17 @@ struct sde_merge_3d_cfg {
 };
 };
 
 
 /**
 /**
+ * struct sde_qdss_cfg - information of qdss blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @len:               length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct sde_qdss_cfg {
+	SDE_HW_BLK_INFO;
+};
+
+/*
  * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
  * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
  * @pps                pixel per seconds
  * @pps                pixel per seconds
  * @ot_limit           OT limit to use up to specified pixel per second
  * @ot_limit           OT limit to use up to specified pixel per second
@@ -1130,6 +1144,8 @@ struct sde_sc_cfg {
  * @cdp_cfg            cdp use case configurations
  * @cdp_cfg            cdp use case configurations
  * @cpu_mask:          pm_qos cpu mask value
  * @cpu_mask:          pm_qos cpu mask value
  * @cpu_dma_latency:   pm_qos cpu dma latency value
  * @cpu_dma_latency:   pm_qos cpu dma latency value
+ * @axi_bus_width:     axi bus width value in bytes
+ * @num_mnoc_ports:    number of mnoc ports
  */
  */
 struct sde_perf_cfg {
 struct sde_perf_cfg {
 	u32 max_bw_low;
 	u32 max_bw_low;
@@ -1156,6 +1172,8 @@ struct sde_perf_cfg {
 	struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
 	struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
 	u32 cpu_mask;
 	u32 cpu_mask;
 	u32 cpu_dma_latency;
 	u32 cpu_dma_latency;
+	u32 axi_bus_width;
+	u32 num_mnoc_ports;
 };
 };
 
 
 /**
 /**
@@ -1338,6 +1356,9 @@ struct sde_mdss_cfg {
 	u32 merge_3d_count;
 	u32 merge_3d_count;
 	struct sde_merge_3d_cfg merge_3d[MAX_BLOCKS];
 	struct sde_merge_3d_cfg merge_3d[MAX_BLOCKS];
 
 
+	u32 qdss_count;
+	struct sde_qdss_cfg qdss[MAX_BLOCKS];
+
 	/* Add additional block data structures here */
 	/* Add additional block data structures here */
 
 
 	struct sde_perf_cfg perf;
 	struct sde_perf_cfg perf;

+ 2 - 2
msm/sde/sde_hw_ctl.c

@@ -138,7 +138,7 @@ static const u32 cdm_flush_tbl[CDM_MAX] = {SDE_NONE, 0};
 /**
 /**
  * list of CWB bits in CTL_CWB_FLUSH
  * list of CWB bits in CTL_CWB_FLUSH
  */
  */
-static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2, 3,
+static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, 1, 2, 3,
 	4, 5};
 	4, 5};
 
 
 /**
 /**
@@ -555,7 +555,7 @@ static inline int sde_hw_ctl_update_bitmask_cwb_v1(struct sde_hw_ctl *ctx,
 	if (!ctx)
 	if (!ctx)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if ((cwb < CWB_2) || (cwb >= CWB_MAX)) {
+	if ((cwb < CWB_1) || (cwb >= CWB_MAX)) {
 		SDE_ERROR("Unsupported cwb %d\n", cwb);
 		SDE_ERROR("Unsupported cwb %d\n", cwb);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}

+ 1 - 0
msm/sde/sde_hw_interrupts.c

@@ -126,6 +126,7 @@
 /**
 /**
  * Concurrent WB overflow interrupt status bit definitions
  * Concurrent WB overflow interrupt status bit definitions
  */
  */
+#define SDE_INTR_CWB_1_OVERFLOW BIT(8)
 #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
 #define SDE_INTR_CWB_2_OVERFLOW BIT(14)
 #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
 #define SDE_INTR_CWB_3_OVERFLOW BIT(15)
 #define SDE_INTR_CWB_4_OVERFLOW BIT(20)
 #define SDE_INTR_CWB_4_OVERFLOW BIT(20)

+ 1 - 0
msm/sde/sde_hw_intf.c

@@ -485,6 +485,7 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
 	SDE_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
 	SDE_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
 	SDE_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
 	SDE_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
+	SDE_REG_WRITE(c, INTF_TEAR_WR_PTR_IRQ, te->wr_ptr_irq);
 	SDE_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos);
 	SDE_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos);
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
 	SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH,
 			((te->sync_threshold_continue << 16) |
 			((te->sync_threshold_continue << 16) |

+ 2 - 1
msm/sde/sde_hw_lm.c

@@ -286,7 +286,8 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
 			IS_SM6150_TARGET(m->hwversion) ||
 			IS_SM6150_TARGET(m->hwversion) ||
 			IS_SDMMAGPIE_TARGET(m->hwversion) ||
 			IS_SDMMAGPIE_TARGET(m->hwversion) ||
 			IS_KONA_TARGET(m->hwversion) ||
 			IS_KONA_TARGET(m->hwversion) ||
-			IS_SAIPAN_TARGET(m->hwversion))
+			IS_SAIPAN_TARGET(m->hwversion) ||
+			IS_SDMTRINKET_TARGET(m->hwversion))
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
 	else
 	else
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config;

+ 9 - 0
msm/sde/sde_hw_mdss.h

@@ -104,6 +104,7 @@ enum sde_hw_blk_type {
 	SDE_HW_BLK_WB,
 	SDE_HW_BLK_WB,
 	SDE_HW_BLK_DSC,
 	SDE_HW_BLK_DSC,
 	SDE_HW_BLK_MERGE_3D,
 	SDE_HW_BLK_MERGE_3D,
+	SDE_HW_BLK_QDSS,
 	SDE_HW_BLK_MAX,
 	SDE_HW_BLK_MAX,
 };
 };
 
 
@@ -322,6 +323,11 @@ enum sde_merge_3d {
 	MERGE_3D_MAX
 	MERGE_3D_MAX
 };
 };
 
 
+enum sde_qdss {
+	QDSS_0,
+	QDSS_MAX
+};
+
 /**
 /**
  * SDE HW,Component order color map
  * SDE HW,Component order color map
  */
  */
@@ -512,6 +518,7 @@ struct sde_mdss_color {
 #define SDE_DBG_MASK_REGDMA   (1 << 14)
 #define SDE_DBG_MASK_REGDMA   (1 << 14)
 #define SDE_DBG_MASK_UIDLE    (1 << 15)
 #define SDE_DBG_MASK_UIDLE    (1 << 15)
 #define SDE_DBG_MASK_SID      (1 << 15)
 #define SDE_DBG_MASK_SID      (1 << 15)
+#define SDE_DBG_MASK_QDSS     (1 << 16)
 
 
 /**
 /**
  * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
  * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
@@ -640,6 +647,7 @@ struct sde_splash_data {
  *                           needs to be above the read pointer
  *                           needs to be above the read pointer
  * @start_pos:	The position from which the start_threshold value is added
  * @start_pos:	The position from which the start_threshold value is added
  * @rd_ptr_irq:	The read pointer line at which interrupt has to be generated
  * @rd_ptr_irq:	The read pointer line at which interrupt has to be generated
+ * @wr_ptr_irq:	The write pointer line at which interrupt has to be generated
  * @hw_vsync_mode:	Sync with external frame sync input
  * @hw_vsync_mode:	Sync with external frame sync input
  */
  */
 struct sde_hw_tear_check {
 struct sde_hw_tear_check {
@@ -650,6 +658,7 @@ struct sde_hw_tear_check {
 	u32 sync_threshold_continue;
 	u32 sync_threshold_continue;
 	u32 start_pos;
 	u32 start_pos;
 	u32 rd_ptr_irq;
 	u32 rd_ptr_irq;
+	u32 wr_ptr_irq;
 	u8 hw_vsync_mode;
 	u8 hw_vsync_mode;
 };
 };
 
 

+ 1 - 0
msm/sde/sde_hw_pingpong.c

@@ -176,6 +176,7 @@ static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp,
 	SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
 	SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
 	SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
 	SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
 	SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
 	SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+	SDE_REG_WRITE(c, PP_WR_PTR_IRQ, te->wr_ptr_irq);
 	SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
 	SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
 	SDE_REG_WRITE(c, PP_SYNC_THRESH,
 	SDE_REG_WRITE(c, PP_SYNC_THRESH,
 			((te->sync_threshold_continue << 16) |
 			((te->sync_threshold_continue << 16) |

+ 100 - 0
msm/sde/sde_hw_qdss.c

@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include "sde_kms.h"
+#include "sde_dbg.h"
+#include "sde_hw_qdss.h"
+
+#define QDSS_CONFIG	0x0
+
+static struct sde_qdss_cfg *_qdss_offset(enum sde_qdss qdss,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->qdss_count; i++) {
+		if (qdss == m->qdss[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->qdss[i].base;
+			b->length = m->qdss[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_QDSS;
+			return &m->qdss[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_qdss_enable_qdss_events(struct sde_hw_qdss *hw_qdss,
+							bool enable)
+{
+	struct sde_hw_blk_reg_map *c = &hw_qdss->hw;
+	u32 val;
+
+	val = enable ? 0x100 : 0;
+
+	if (c)
+		SDE_REG_WRITE(c, QDSS_CONFIG, val);
+}
+
+static void _setup_qdss_ops(struct sde_hw_qdss_ops *ops)
+{
+	ops->enable_qdss_events = sde_hw_qdss_enable_qdss_events;
+}
+
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct sde_hw_qdss *sde_hw_qdss_init(enum sde_qdss idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *m)
+{
+	struct sde_hw_qdss *c;
+	struct sde_qdss_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _qdss_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_qdss_ops(&c->ops);
+
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_QDSS, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		kzfree(c);
+		return ERR_PTR(rc);
+	}
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_qdss_destroy(struct sde_hw_qdss *qdss)
+{
+	if (qdss)
+		sde_hw_blk_destroy(&qdss->base);
+	kfree(qdss);
+}

+ 68 - 0
msm/sde/sde_hw_qdss.h

@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _SDE_HW_QDSS_H
+#define _SDE_HW_QDSS_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_blk.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_qdss;
+
+/**
+ * struct sde_hw_qdss_ops - interface to the qdss hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_qdss_ops {
+	/**
+	 * enable_qdss_events - enable qdss events
+	 * @hw_qdss: Pointer to qdss context
+	 */
+	void (*enable_qdss_events)(struct sde_hw_qdss *hw_qdss, bool enable);
+};
+
+struct sde_hw_qdss {
+	struct sde_hw_blk base;
+	struct sde_hw_blk_reg_map hw;
+
+	/* qdss */
+	enum sde_qdss idx;
+	const struct sde_qdss_cfg *caps;
+
+	/* ops */
+	struct sde_hw_qdss_ops ops;
+};
+
+/**
+ * to_sde_hw_qdss - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_qdss *to_sde_hw_qdss(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_qdss, base);
+}
+
+/**
+ * sde_hw_qdss_init - initializes the qdss block for the passed qdss idx
+ * @idx:  QDSS index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated sde_hw_qdss context
+ */
+struct sde_hw_qdss *sde_hw_qdss_init(enum sde_qdss idx,
+				void __iomem *addr,
+				struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_qdss_destroy - destroys qdss driver context
+ *			 should be called to free the context
+ * @qdss: Pointer to qdss driver context returned by sde_hw_qdss_init
+ */
+void sde_hw_qdss_destroy(struct sde_hw_qdss *qdss);
+
+#endif /*_SDE_HW_QDSS_H */

+ 11 - 2
msm/sde/sde_hw_sspp.c

@@ -360,6 +360,11 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
 		if (IS_UBWC_40_SUPPORTED(ctx->catalog->ubwc_version)) {
 		if (IS_UBWC_40_SUPPORTED(ctx->catalog->ubwc_version)) {
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
 				SDE_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
 				SDE_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
+		} else if (IS_UBWC_10_SUPPORTED(ctx->catalog->ubwc_version)) {
+			alpha_en_mask = const_alpha_en ? BIT(31) : 0;
+			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+				alpha_en_mask | (ctx->mdp->ubwc_swizzle & 0x1) |
+				BIT(8) | (ctx->mdp->highest_bank_bit << 4));
 		} else if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
 		} else if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
 			alpha_en_mask = const_alpha_en ? BIT(31) : 0;
 			alpha_en_mask = const_alpha_en ? BIT(31) : 0;
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
@@ -686,7 +691,8 @@ static void _sde_hw_sspp_setup_excl_rect(struct sde_hw_pipe *ctx,
 	u32 size, xy;
 	u32 size, xy;
 	u32 idx;
 	u32 idx;
 	u32 reg_xy, reg_size;
 	u32 reg_xy, reg_size;
-	u32 excl_ctrl, enable_bit;
+	u32 excl_ctrl = BIT(0);
+	u32 enable_bit;
 
 
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !excl_rect)
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !excl_rect)
 		return;
 		return;
@@ -706,7 +712,10 @@ static void _sde_hw_sspp_setup_excl_rect(struct sde_hw_pipe *ctx,
 	xy = (excl_rect->y << 16) | (excl_rect->x);
 	xy = (excl_rect->y << 16) | (excl_rect->x);
 	size = (excl_rect->h << 16) | (excl_rect->w);
 	size = (excl_rect->h << 16) | (excl_rect->w);
 
 
-	excl_ctrl = SDE_REG_READ(c, SSPP_EXCL_REC_CTL + idx);
+	/* Set if multi-rect disabled, read+modify only if multi-rect enabled */
+	if (rect_index != SDE_SSPP_RECT_SOLO)
+		excl_ctrl = SDE_REG_READ(c, SSPP_EXCL_REC_CTL + idx);
+
 	if (!size) {
 	if (!size) {
 		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
 		SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
 				excl_ctrl & ~enable_bit);
 				excl_ctrl & ~enable_bit);

+ 2 - 0
msm/sde/sde_hw_top.c

@@ -409,6 +409,8 @@ void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
 
 
 		if (IS_UBWC_30_SUPPORTED(m->ubwc_version))
 		if (IS_UBWC_30_SUPPORTED(m->ubwc_version))
 			reg |= BIT(10);
 			reg |= BIT(10);
+		if (IS_UBWC_10_SUPPORTED(m->ubwc_version))
+			reg |= BIT(8);
 
 
 		SDE_REG_WRITE(&c, UBWC_STATIC, reg);
 		SDE_REG_WRITE(&c, UBWC_STATIC, reg);
 	} else {
 	} else {

+ 2 - 1
msm/sde/sde_hw_vbif.c

@@ -235,7 +235,8 @@ static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
 	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
 	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
 		ops->set_qos_remap = sde_hw_set_qos_remap;
 		ops->set_qos_remap = sde_hw_set_qos_remap;
 	if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
 	if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
-			IS_SDMMAGPIE_TARGET(m->hwversion))
+			IS_SDMMAGPIE_TARGET(m->hwversion) ||
+			IS_SDMTRINKET_TARGET(m->hwversion))
 		ops->set_mem_type = sde_hw_set_mem_type_v1;
 		ops->set_mem_type = sde_hw_set_mem_type_v1;
 	else
 	else
 		ops->set_mem_type = sde_hw_set_mem_type;
 		ops->set_mem_type = sde_hw_set_mem_type;

+ 5 - 0
msm/sde/sde_hw_wb.c

@@ -162,6 +162,11 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
 			SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
 			SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
 					(ctx->mdp->ubwc_swizzle << 0) |
 					(ctx->mdp->ubwc_swizzle << 0) |
 					(ctx->mdp->highest_bank_bit << 4));
 					(ctx->mdp->highest_bank_bit << 4));
+		if (IS_UBWC_10_SUPPORTED(ctx->catalog->ubwc_version))
+			SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
+					(ctx->mdp->ubwc_swizzle << 0) |
+					BIT(8) |
+					(ctx->mdp->highest_bank_bit << 4));
 	}
 	}
 
 
 	if (data->is_secure)
 	if (data->is_secure)

+ 120 - 3
msm/sde/sde_rm.c

@@ -16,6 +16,7 @@
 #include "sde_encoder.h"
 #include "sde_encoder.h"
 #include "sde_connector.h"
 #include "sde_connector.h"
 #include "sde_hw_dsc.h"
 #include "sde_hw_dsc.h"
+#include "sde_crtc.h"
 
 
 #define RESERVED_BY_OTHER(h, r) \
 #define RESERVED_BY_OTHER(h, r) \
 	(((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ||\
 	(((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ||\
@@ -879,8 +880,8 @@ static int _sde_rm_reserve_lms(
 	int i, rc = 0;
 	int i, rc = 0;
 
 
 	if (!reqs->topology->num_lm) {
 	if (!reqs->topology->num_lm) {
-		SDE_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
-		return -EINVAL;
+		SDE_DEBUG("invalid number of lm: %d\n", reqs->topology->num_lm);
+		return 0;
 	}
 	}
 
 
 	/* Find a primary mixer */
 	/* Find a primary mixer */
@@ -994,6 +995,11 @@ static int _sde_rm_reserve_ctls(
 	struct sde_rm_hw_iter iter;
 	struct sde_rm_hw_iter iter;
 	int i = 0;
 	int i = 0;
 
 
+	if (!top->num_ctl) {
+		SDE_DEBUG("invalid number of ctl: %d\n", top->num_ctl);
+		return 0;
+	}
+
 	memset(&ctls, 0, sizeof(ctls));
 	memset(&ctls, 0, sizeof(ctls));
 
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
@@ -1551,9 +1557,23 @@ static int _sde_rm_populate_requirements(
 	 * Set the requirement for LM which has CWB support if CWB is
 	 * Set the requirement for LM which has CWB support if CWB is
 	 * found enabled.
 	 * found enabled.
 	 */
 	 */
-	if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc))
+	if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc)) {
 		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_CWB);
 		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_CWB);
 
 
+		/*
+		 * topology selection based on conn mode is not valid for CWB
+		 * as WB conn populates modes based on max_mixer_width check
+		 * but primary can be using dual LMs. This topology override for
+		 * CWB is to check number of datapath active in primary and
+		 * allocate same number of LM/PP blocks reserved for CWB
+		 */
+		reqs->topology =
+			&rm->topology_tbl[SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE];
+		if (sde_crtc_get_num_datapath(crtc_state->crtc) == 1)
+			reqs->topology =
+				&rm->topology_tbl[SDE_RM_TOPOLOGY_SINGLEPIPE];
+	}
+
 	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
 	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
 			reqs->hw_res.display_num_of_h_tiles);
 			reqs->hw_res.display_num_of_h_tiles);
 	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
 	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
@@ -1904,3 +1924,100 @@ end:
 
 
 	return ret;
 	return ret;
 }
 }
+
+int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
+		struct sde_hw_blk *hw, struct drm_encoder *enc)
+{
+	struct sde_rm_hw_blk *blk;
+	struct sde_rm_rsvp *rsvp;
+	int ret = 0;
+
+	if (!rm || !hw || !enc) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	if (hw->type >= SDE_HW_BLK_MAX) {
+		SDE_ERROR("invalid HW type\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&rm->rm_lock);
+
+	rsvp = _sde_rm_get_rsvp(rm, enc);
+	if (!rsvp) {
+		rsvp = kzalloc(sizeof(*rsvp), GFP_KERNEL);
+		if (!rsvp) {
+			ret = -ENOMEM;
+			goto end;
+		}
+
+		rsvp->seq = ++rm->rsvp_next_seq;
+		rsvp->enc_id = enc->base.id;
+		list_add_tail(&rsvp->list, &rm->rsvps);
+
+		SDE_DEBUG("create rsvp %d for enc %d\n",
+					rsvp->seq, rsvp->enc_id);
+	}
+
+	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+	if (!blk) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	blk->type = hw->type;
+	blk->id = hw->id;
+	blk->hw = hw;
+	blk->rsvp = rsvp;
+	list_add_tail(&blk->list, &rm->hw_blks[hw->type]);
+
+	SDE_DEBUG("create blk %d %d for rsvp %d enc %d\n", blk->type, blk->id,
+					rsvp->seq, rsvp->enc_id);
+
+end:
+	mutex_unlock(&rm->rm_lock);
+	return ret;
+}
+
+int sde_rm_ext_blk_destroy(struct sde_rm *rm,
+		struct drm_encoder *enc)
+{
+	struct sde_rm_hw_blk *blk = NULL, *p;
+	struct sde_rm_rsvp *rsvp;
+	enum sde_hw_blk_type type;
+	int ret = 0;
+
+	if (!rm || !enc) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&rm->rm_lock);
+
+	rsvp = _sde_rm_get_rsvp(rm, enc);
+	if (!rsvp) {
+		ret = -ENOENT;
+		SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+		list_for_each_entry_safe(blk, p, &rm->hw_blks[type], list) {
+			if (blk->rsvp == rsvp) {
+				list_del(&blk->list);
+				SDE_DEBUG("del blk %d %d from rsvp %d enc %d\n",
+						blk->type, blk->id,
+						rsvp->seq, rsvp->enc_id);
+				kfree(blk);
+			}
+		}
+	}
+
+	SDE_DEBUG("del rsvp %d\n", rsvp->seq);
+	list_del(&rsvp->list);
+	kfree(rsvp);
+end:
+	mutex_unlock(&rm->rm_lock);
+	return ret;
+}

+ 22 - 7
msm/sde/sde_rm.h

@@ -247,13 +247,6 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
  */
  */
 bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw);
 bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw);
 
 
-/**
- * sde_rm_check_property_topctl - validate property bitmask before it is set
- * @val: user's proposed topology control bitmask
- * @Return: 0 on success or error
- */
-int sde_rm_check_property_topctl(uint64_t val);
-
 /**
 /**
  * sde_rm_cont_splash_res_init - Read the current MDSS configuration
  * sde_rm_cont_splash_res_init - Read the current MDSS configuration
  *	to update the splash data structure with the topology
  *	to update the splash data structure with the topology
@@ -297,4 +290,26 @@ static inline bool sde_rm_topology_is_dual_ctl(struct sde_rm *rm,
 
 
 	return rm->topology_tbl[topology].num_ctl == DUAL_CTL;
 	return rm->topology_tbl[topology].num_ctl == DUAL_CTL;
 }
 }
+
+/**
+ * sde_rm_ext_blk_create_reserve - Create external HW blocks
+ *	in resource manager and reserve for specific encoder.
+ * @rm: SDE Resource Manager handle
+ * @hw: external HW block
+ * @drm_enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
+				struct sde_hw_blk *hw,
+				struct drm_encoder *enc);
+
+/**
+ * sde_rm_ext_blk_destroy - Given the encoder for the display chain, release
+ *	external HW blocks created for that.
+ * @rm: SDE Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_ext_blk_destroy(struct sde_rm *rm,
+				struct drm_encoder *enc);
 #endif /* __SDE_RM_H__ */
 #endif /* __SDE_RM_H__ */

+ 22 - 9
msm/sde_dbg.c

@@ -238,6 +238,7 @@ static struct sde_dbg_base {
 	u32 cur_evt_index;
 	u32 cur_evt_index;
 	u32 dbgbus_dump_idx;
 	u32 dbgbus_dump_idx;
 	u32 vbif_dbgbus_dump_idx;
 	u32 vbif_dbgbus_dump_idx;
+	enum sde_dbg_dump_context dump_mode;
 } sde_dbg_base;
 } sde_dbg_base;
 
 
 /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
 /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
@@ -2827,6 +2828,17 @@ static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
 	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
 	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
 };
 };
 
 
+/**
+ * _sde_power_check - check if power needs to enabled
+ * @dump_mode: to check if power need to be enabled
+ * Return: true if success; false otherwise
+ */
+static inline bool _sde_power_check(enum sde_dbg_dump_context dump_mode)
+{
+	return (dump_mode == SDE_DBG_DUMP_CLK_ENABLED_CTX ||
+		dump_mode == SDE_DBG_DUMP_IRQ_CTX) ? false : true;
+}
+
 /**
 /**
  * _sde_dump_reg - helper function for dumping rotator register set content
  * _sde_dump_reg - helper function for dumping rotator register set content
  * @dump_name: register set name
  * @dump_name: register set name
@@ -2838,8 +2850,7 @@ static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
  * @from_isr: whether being called from isr context
  * @from_isr: whether being called from isr context
  */
  */
 static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
 static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
-		char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem,
-		bool from_isr)
+		char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem)
 {
 {
 	u32 in_log, in_mem, len_align, len_padded;
 	u32 in_log, in_mem, len_align, len_padded;
 	u32 *dump_addr = NULL;
 	u32 *dump_addr = NULL;
@@ -2887,7 +2898,7 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
 		}
 		}
 	}
 	}
 
 
-	if (!from_isr) {
+	if (_sde_power_check(sde_dbg_base.dump_mode)) {
 		rc = pm_runtime_get_sync(sde_dbg_base.dev);
 		rc = pm_runtime_get_sync(sde_dbg_base.dev);
 		if (rc < 0) {
 		if (rc < 0) {
 			pr_err("failed to enable power %d\n", rc);
 			pr_err("failed to enable power %d\n", rc);
@@ -2919,7 +2930,7 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
 		addr += REG_DUMP_ALIGN;
 		addr += REG_DUMP_ALIGN;
 	}
 	}
 
 
-	if (!from_isr)
+	if (_sde_power_check(sde_dbg_base.dump_mode))
 		pm_runtime_put_sync(sde_dbg_base.dev);
 		pm_runtime_put_sync(sde_dbg_base.dev);
 }
 }
 
 
@@ -3022,7 +3033,7 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
 
 
 			_sde_dump_reg(range_node->range_name, reg_dump_flag,
 			_sde_dump_reg(range_node->range_name, reg_dump_flag,
 					dbg->base, addr, len,
 					dbg->base, addr, len,
-					&range_node->reg_dump, false);
+					&range_node->reg_dump);
 		}
 		}
 	} else {
 	} else {
 		/* If there is no list to dump ranges, dump all registers */
 		/* If there is no list to dump ranges, dump all registers */
@@ -3033,7 +3044,7 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
 		addr = dbg->base;
 		addr = dbg->base;
 		len = dbg->max_offset;
 		len = dbg->max_offset;
 		_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
 		_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
-				&dbg->reg_dump, false);
+				&dbg->reg_dump);
 	}
 	}
 }
 }
 
 
@@ -3441,7 +3452,7 @@ static void _sde_dump_work(struct work_struct *work)
 		sde_dbg_base.dump_all, sde_dbg_base.dump_secure);
 		sde_dbg_base.dump_all, sde_dbg_base.dump_secure);
 }
 }
 
 
-void sde_dbg_dump(bool queue_work, const char *name, ...)
+void sde_dbg_dump(enum sde_dbg_dump_context dump_mode, const char *name, ...)
 {
 {
 	int i, index = 0;
 	int i, index = 0;
 	bool do_panic = false;
 	bool do_panic = false;
@@ -3458,7 +3469,8 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
 	if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_ALWAYS))
 	if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_ALWAYS))
 		return;
 		return;
 
 
-	if (queue_work && work_pending(&sde_dbg_base.dump_work))
+	if ((dump_mode == SDE_DBG_DUMP_IRQ_CTX) &&
+		work_pending(&sde_dbg_base.dump_work))
 		return;
 		return;
 
 
 	blk_arr = &sde_dbg_base.req_dump_blks[0];
 	blk_arr = &sde_dbg_base.req_dump_blks[0];
@@ -3467,6 +3479,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
 	memset(sde_dbg_base.req_dump_blks, 0,
 	memset(sde_dbg_base.req_dump_blks, 0,
 			sizeof(sde_dbg_base.req_dump_blks));
 			sizeof(sde_dbg_base.req_dump_blks));
 	sde_dbg_base.dump_all = false;
 	sde_dbg_base.dump_all = false;
+	sde_dbg_base.dump_mode = dump_mode;
 
 
 	va_start(args, name);
 	va_start(args, name);
 	i = 0;
 	i = 0;
@@ -3509,7 +3522,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
 	}
 	}
 	va_end(args);
 	va_end(args);
 
 
-	if (queue_work) {
+	if (dump_mode == SDE_DBG_DUMP_IRQ_CTX) {
 		/* schedule work to dump later */
 		/* schedule work to dump later */
 		sde_dbg_base.work_panic = do_panic;
 		sde_dbg_base.work_panic = do_panic;
 		sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =
 		sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =

+ 24 - 6
msm/sde_dbg.h

@@ -42,6 +42,12 @@ enum sde_dbg_dump_flag {
 	SDE_DBG_DUMP_IN_MEM = BIT(1),
 	SDE_DBG_DUMP_IN_MEM = BIT(1),
 };
 };
 
 
+enum sde_dbg_dump_context {
+	SDE_DBG_DUMP_PROC_CTX,
+	SDE_DBG_DUMP_IRQ_CTX,
+	SDE_DBG_DUMP_CLK_ENABLED_CTX,
+};
+
 #define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
 #define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
 
 
 /*
 /*
@@ -126,8 +132,8 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
  *		Including the special name "panic" will trigger a panic after
  *		Including the special name "panic" will trigger a panic after
  *		the dumping work has completed.
  *		the dumping work has completed.
  */
  */
-#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
-		SDE_DBG_DUMP_DATA_LIMITER)
+#define SDE_DBG_DUMP(...) sde_dbg_dump(SDE_DBG_DUMP_PROC_CTX, __func__, \
+		##__VA_ARGS__, SDE_DBG_DUMP_DATA_LIMITER)
 
 
 /**
 /**
  * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work
  * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work
@@ -137,8 +143,19 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
  *		Including the special name "panic" will trigger a panic after
  *		Including the special name "panic" will trigger a panic after
  *		the dumping work has completed.
  *		the dumping work has completed.
  */
  */
-#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
-		SDE_DBG_DUMP_DATA_LIMITER)
+#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(SDE_DBG_DUMP_IRQ_CTX, __func__, \
+		##__VA_ARGS__, SDE_DBG_DUMP_DATA_LIMITER)
+
+/**
+ * SDE_DBG_DUMP_CLK_EN - trigger dumping of all sde_dbg facilities, without clk
+ * @va_args:	list of named register dump ranges and regions to dump, as
+ *		registered previously through sde_dbg_reg_register_base and
+ *		sde_dbg_reg_register_dump_range.
+ *		Including the special name "panic" will trigger a panic after
+ *		the dumping work has completed.
+ */
+#define SDE_DBG_DUMP_CLK_EN(...) sde_dbg_dump(SDE_DBG_DUMP_CLK_ENABLED_CTX, \
+		__func__, ##__VA_ARGS__, SDE_DBG_DUMP_DATA_LIMITER)
 
 
 /**
 /**
  * SDE_DBG_EVT_CTRL - trigger a different driver events
  * SDE_DBG_EVT_CTRL - trigger a different driver events
@@ -241,7 +258,7 @@ void sde_dbg_destroy(void);
  *		the dumping work has completed.
  *		the dumping work has completed.
  * Returns:	none
  * Returns:	none
  */
  */
-void sde_dbg_dump(bool queue_work, const char *name, ...);
+void sde_dbg_dump(enum sde_dbg_dump_context mode, const char *name, ...);
 
 
 /**
 /**
  * sde_dbg_ctrl - trigger specific actions for the driver with debugging
  * sde_dbg_ctrl - trigger specific actions for the driver with debugging
@@ -394,7 +411,8 @@ static inline void sde_dbg_destroy(void)
 {
 {
 }
 }
 
 
-static inline void sde_dbg_dump(bool queue_work, const char *name, ...)
+static inline void sde_dbg_dump(enum sde_dbg_dump_context,
+	const char *name, ...)
 {
 {
 }
 }