Browse Source

disp: msm: sde: switch to WD vsync on unexpected panel jitter

Switch to watchdog vsync whenever panel jitter is
identified during frame-transfer on command mode display.
This would allow the HW to finish processing the frame
with watchdog vsync source. Switch back to default vsync
source after the frame-transfer is complete. This would
help in the MDP hang issues in panels that generate TEs
with thresholds greater than the projected jitter.

Change-Id: Ic3fa78d90e7f44cb0186857716ac27e72505fd32
Signed-off-by: Veera Sundaram Sankaran <[email protected]>
Veera Sundaram Sankaran 5 years ago
parent
commit
fb54f6e6e7
3 changed files with 186 additions and 30 deletions
  1. 31 5
      msm/sde/sde_encoder.c
  2. 32 0
      msm/sde/sde_encoder_phys.h
  3. 123 25
      msm/sde/sde_encoder_phys_cmd.c

+ 31 - 5
msm/sde/sde_encoder.c

@@ -1769,6 +1769,9 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
 		else
 		else
 			vsync_source = sde_enc->te_source;
 			vsync_source = sde_enc->te_source;
 
 
+		SDE_EVT32(DRMID(&sde_enc->base), vsync_source, is_dummy,
+				disp_info->is_te_using_watchdog_timer);
+
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		for (i = 0; i < sde_enc->num_phys_encs; i++) {
 			phys = sde_enc->phys_encs[i];
 			phys = sde_enc->phys_encs[i];
 
 
@@ -1825,7 +1828,8 @@ static void _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc)
 	 */
 	 */
 }
 }
 
 
-static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
+int sde_encoder_helper_switch_vsync(struct drm_encoder *drm_enc,
+	 bool watchdog_te)
 {
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_virt *sde_enc;
 	struct msm_display_info disp_info;
 	struct msm_display_info disp_info;
@@ -1840,7 +1844,7 @@ static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
 	sde_encoder_control_te(drm_enc, false);
 	sde_encoder_control_te(drm_enc, false);
 
 
 	memcpy(&disp_info, &sde_enc->disp_info, sizeof(disp_info));
 	memcpy(&disp_info, &sde_enc->disp_info, sizeof(disp_info));
-	disp_info.is_te_using_watchdog_timer = true;
+	disp_info.is_te_using_watchdog_timer = watchdog_te;
 	_sde_encoder_update_vsync_source(sde_enc, &disp_info, false);
 	_sde_encoder_update_vsync_source(sde_enc, &disp_info, false);
 
 
 	sde_encoder_control_te(drm_enc, true);
 	sde_encoder_control_te(drm_enc, true);
@@ -1901,7 +1905,7 @@ static int _sde_encoder_rsc_client_update_vsync_wait(
 			 * by generating the vsync from watchdog timer.
 			 * by generating the vsync from watchdog timer.
 			 */
 			 */
 			if (crtc->base.id == wait_vblank_crtc_id)
 			if (crtc->base.id == wait_vblank_crtc_id)
-				_sde_encoder_switch_to_watchdog_vsync(drm_enc);
+				sde_encoder_helper_switch_vsync(drm_enc, true);
 		}
 		}
 	}
 	}
 
 
@@ -5696,6 +5700,28 @@ int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
 	return ret;
 	return ret;
 }
 }
 
 
+void sde_encoder_helper_get_jitter_bounds_ns(struct drm_encoder *drm_enc,
+		u64 *l_bound, u64 *u_bound)
+{
+	struct sde_encoder_virt *sde_enc;
+	u64 jitter_ns, frametime_ns;
+	struct msm_mode_info *info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	info = &sde_enc->mode_info;
+
+	frametime_ns = (1 * 1000000000) / info->frame_rate;
+	jitter_ns =  (info->jitter_numer * frametime_ns) /
+				(info->jitter_denom * 100);
+	*l_bound = frametime_ns - jitter_ns;
+	*u_bound = frametime_ns + jitter_ns;
+}
+
 u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
 u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
 {
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_virt *sde_enc;
@@ -5993,12 +6019,12 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc,
 		kthread_flush_work(&sde_enc->esd_trigger_work);
 		kthread_flush_work(&sde_enc->esd_trigger_work);
 	}
 	}
 
 
-	/**
+	/*
 	 * panel may stop generating te signal (vsync) during esd failure. rsc
 	 * panel may stop generating te signal (vsync) during esd failure. rsc
 	 * hardware may hang without vsync. Avoid rsc hang by generating the
 	 * hardware may hang without vsync. Avoid rsc hang by generating the
 	 * vsync from watchdog timer instead of panel.
 	 * vsync from watchdog timer instead of panel.
 	 */
 	 */
-	_sde_encoder_switch_to_watchdog_vsync(enc);
+	sde_encoder_helper_switch_vsync(enc, true);
 
 
 	if (!skip_pre_kickoff)
 	if (!skip_pre_kickoff)
 		sde_encoder_wait_for_event(enc, MSM_ENC_TX_COMPLETE);
 		sde_encoder_wait_for_event(enc, MSM_ENC_TX_COMPLETE);

+ 32 - 0
msm/sde/sde_encoder_phys.h

@@ -25,6 +25,7 @@
 #define KICKOFF_TIMEOUT_MS		84
 #define KICKOFF_TIMEOUT_MS		84
 #define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
 #define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
 
 
+#define MAX_TE_PROFILE_COUNT		5
 /**
 /**
  * enum sde_enc_split_role - Role this physical encoder will play in a
  * enum sde_enc_split_role - Role this physical encoder will play in a
  *	split-panel configuration, where one panel is master, and others slaves.
  *	split-panel configuration, where one panel is master, and others slaves.
@@ -361,6 +362,17 @@ struct sde_encoder_phys_cmd_autorefresh {
 	wait_queue_head_t kickoff_wq;
 	wait_queue_head_t kickoff_wq;
 };
 };
 
 
+/**
+ * struct sde_encoder_phys_cmd_te_timestamp - list node to keep track of
+ *     rd_ptr/TE timestamp
+ * @list: list node
+ * @timestamp: TE timestamp
+ */
+struct sde_encoder_phys_cmd_te_timestamp {
+	struct list_head list;
+	ktime_t timestamp;
+};
+
 /**
 /**
  * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
  * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
  *	mode specific operations
  *	mode specific operations
@@ -371,6 +383,8 @@ struct sde_encoder_phys_cmd_autorefresh {
  * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
  * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
  * @pending_vblank_wq: Wait queue for blocking until VBLANK received
  * @pending_vblank_wq: Wait queue for blocking until VBLANK received
  * @wr_ptr_wait_success: log wr_ptr_wait success for release fence trigger
  * @wr_ptr_wait_success: log wr_ptr_wait success for release fence trigger
+ * @te_timestamp_list: List head for the TE timestamp list
+ * @te_timestamp: Array of size MAX_TE_PROFILE_COUNT te_timestamp_list elements
  */
  */
 struct sde_encoder_phys_cmd {
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
 	struct sde_encoder_phys base;
@@ -380,6 +394,9 @@ struct sde_encoder_phys_cmd {
 	atomic_t pending_vblank_cnt;
 	atomic_t pending_vblank_cnt;
 	wait_queue_head_t pending_vblank_wq;
 	wait_queue_head_t pending_vblank_wq;
 	bool wr_ptr_wait_success;
 	bool wr_ptr_wait_success;
+	struct list_head te_timestamp_list;
+	struct sde_encoder_phys_cmd_te_timestamp
+			te_timestamp[MAX_TE_PROFILE_COUNT];
 };
 };
 
 
 /**
 /**
@@ -558,6 +575,21 @@ int sde_encoder_helper_wait_event_timeout(
 		int32_t hw_id,
 		int32_t hw_id,
 		struct sde_encoder_wait_info *info);
 		struct sde_encoder_wait_info *info);
 
 
+/*
+ * sde_encoder_get_fps - get the allowed panel jitter in nanoseconds
+ * @encoder: Pointer to drm encoder object
+ */
+void sde_encoder_helper_get_jitter_bounds_ns(struct drm_encoder *encoder,
+			u64 *l_bound, u64 *u_bound);
+
+/**
+ * sde_encoder_helper_switch_vsync - switch vsync source to WD or default
+ * @drm_enc:     Pointer to drm encoder structure
+ * @watchdog_te: switch vsync source to watchdog TE
+ */
+int sde_encoder_helper_switch_vsync(struct drm_encoder *drm_enc,
+		bool watchdog_te);
+
 /**
 /**
  * sde_encoder_helper_hw_reset - issue ctl hw reset
  * sde_encoder_helper_hw_reset - issue ctl hw reset
  *	This helper function may be optionally specified by physical
  *	This helper function may be optionally specified by physical

+ 123 - 25
msm/sde/sde_encoder_phys_cmd.c

@@ -230,6 +230,8 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	u32 scheduler_status = INVALID_CTL_STATUS;
 	u32 scheduler_status = INVALID_CTL_STATUS;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_ctl *ctl;
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
+	struct sde_encoder_phys_cmd_te_timestamp *te_timestamp;
+	unsigned long lock_flags;
 
 
 	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
 	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
 		return;
 		return;
@@ -241,6 +243,16 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	if (ctl && ctl->ops.get_scheduler_status)
 	if (ctl && ctl->ops.get_scheduler_status)
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 
 
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	te_timestamp = list_first_entry_or_null(&cmd_enc->te_timestamp_list,
+				struct sde_encoder_phys_cmd_te_timestamp, list);
+	if (te_timestamp) {
+		list_del_init(&te_timestamp->list);
+		te_timestamp->timestamp = ktime_get();
+		list_add_tail(&te_timestamp->list, &cmd_enc->te_timestamp_list);
+	}
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 		info[0].pp_idx, info[0].intf_idx,
 		info[0].pp_idx, info[0].intf_idx,
@@ -1337,11 +1349,54 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
 	return ret;
 	return ret;
 }
 }
 
 
+static bool _sde_encoder_phys_cmd_needs_vsync_change(
+		struct sde_encoder_phys *phys_enc, ktime_t profile_timestamp)
+{
+	struct sde_encoder_phys_cmd *cmd_enc;
+	struct sde_encoder_phys_cmd_te_timestamp *cur;
+	struct sde_encoder_phys_cmd_te_timestamp *prev = NULL;
+	ktime_t time_diff;
+	u64 l_bound = 0, u_bound = 0;
+	bool ret = false;
+	unsigned long lock_flags;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	sde_encoder_helper_get_jitter_bounds_ns(phys_enc->parent,
+							&l_bound, &u_bound);
+	if (!l_bound || !u_bound) {
+		SDE_ERROR_CMDENC(cmd_enc, "invalid vsync jitter bounds\n");
+		return false;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	list_for_each_entry_reverse(cur, &cmd_enc->te_timestamp_list, list) {
+		if (prev && ktime_after(cur->timestamp, profile_timestamp)) {
+			time_diff = ktime_sub(prev->timestamp, cur->timestamp);
+			if ((time_diff < l_bound) || (time_diff > u_bound)) {
+				ret = true;
+				break;
+			}
+		}
+		prev = cur;
+	}
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	if (ret) {
+		SDE_DEBUG_CMDENC(cmd_enc,
+		    "time_diff:%llu, prev:%llu, cur:%llu, jitter:%llu/%llu\n",
+			time_diff, prev->timestamp, cur->timestamp,
+			l_bound, u_bound);
+		SDE_EVT32(DRMID(phys_enc->parent),
+			(u32) (l_bound / 1000), (u32) (u_bound / 1000),
+			(u32) (time_diff / 1000), SDE_EVTLOG_ERROR);
+	}
+
+	return ret;
+}
+
 static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		struct sde_encoder_phys *phys_enc)
 		struct sde_encoder_phys *phys_enc)
 {
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_encoder_wait_info wait_info = {0};
 	struct sde_encoder_wait_info wait_info = {0};
 	int ret;
 	int ret;
 	bool frame_pending = true;
 	bool frame_pending = true;
@@ -1369,29 +1424,9 @@ static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		if (ctl && ctl->ops.get_start_state)
 		if (ctl && ctl->ops.get_start_state)
 			frame_pending = ctl->ops.get_start_state(ctl);
 			frame_pending = ctl->ops.get_start_state(ctl);
 
 
-		if (frame_pending)
-			SDE_ERROR_CMDENC(cmd_enc,
-				"wr_ptrt start interrupt wait failed\n");
-		else
-			ret = 0;
-
-		/*
-		 * Signaling the retire fence at wr_ptr timeout
-		 * to allow the next commit and avoid device freeze.
-		 * As wr_ptr timeout can occurs due to no read ptr,
-		 * updating pending_rd_ptr_cnt here may not cover all
-		 * cases. Hence signaling the retire fence.
-		 */
-		if (sde_encoder_phys_cmd_is_master(phys_enc) &&
-			atomic_add_unless(&phys_enc->pending_retire_fence_cnt,
-				-1, 0))
-			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+		ret = frame_pending ? ret : 0;
 	}
 	}
 
 
-	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;
-
 	return ret;
 	return ret;
 }
 }
 
 
@@ -1416,11 +1451,60 @@ static int sde_encoder_phys_cmd_wait_for_tx_complete(
 	return rc;
 	return rc;
 }
 }
 
 
+static int _sde_encoder_phys_cmd_handle_wr_ptr_timeout(
+		struct sde_encoder_phys *phys_enc,
+		ktime_t profile_timestamp)
+{
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	bool switch_te;
+	int ret = -ETIMEDOUT;
+
+	switch_te = _sde_encoder_phys_cmd_needs_vsync_change(
+				phys_enc, profile_timestamp);
+
+	SDE_EVT32(DRMID(phys_enc->parent), switch_te, SDE_EVTLOG_FUNC_ENTRY);
+
+	if (switch_te) {
+		SDE_DEBUG_CMDENC(cmd_enc,
+				"wr_ptr_irq wait failed, retry with WD TE\n");
+
+		/* switch to watchdog TE and wait again */
+		sde_encoder_helper_switch_vsync(phys_enc->parent, true);
+
+		ret = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
+
+		/* switch back to default TE */
+		sde_encoder_helper_switch_vsync(phys_enc->parent, false);
+	}
+
+	/*
+	 * Signaling the retire fence at wr_ptr timeout
+	 * to allow the next commit and avoid device freeze.
+	 */
+	if (ret == -ETIMEDOUT) {
+		SDE_ERROR_CMDENC(cmd_enc,
+			"wr_ptr_irq wait failed, switch_te:%d\n", switch_te);
+		SDE_EVT32(DRMID(phys_enc->parent), switch_te, SDE_EVTLOG_ERROR);
+
+		if (sde_encoder_phys_cmd_is_master(phys_enc) &&
+		  atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0))
+			phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+	}
+
+	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;
+
+	return ret;
+}
+
 static int sde_encoder_phys_cmd_wait_for_commit_done(
 static int sde_encoder_phys_cmd_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 		struct sde_encoder_phys *phys_enc)
 {
 {
 	int rc = 0, i, pending_cnt;
 	int rc = 0, i, pending_cnt;
 	struct sde_encoder_phys_cmd *cmd_enc;
 	struct sde_encoder_phys_cmd *cmd_enc;
+	ktime_t profile_timestamp = ktime_get();
 
 
 	if (!phys_enc)
 	if (!phys_enc)
 		return -EINVAL;
 		return -EINVAL;
@@ -1430,8 +1514,18 @@ static int sde_encoder_phys_cmd_wait_for_commit_done(
 	/* only required for master controller */
 	/* only required for master controller */
 	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 		rc = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
 		rc = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
-		if (rc == -ETIMEDOUT)
-			goto wait_for_idle;
+		if (rc == -ETIMEDOUT) {
+			/*
+			 * Profile all the TE received after profile_timestamp
+			 * and if the jitter is more, switch to watchdog TE
+			 * and wait for wr_ptr again. Finally move back to
+			 * default TE.
+			 */
+			rc = _sde_encoder_phys_cmd_handle_wr_ptr_timeout(
+					phys_enc, profile_timestamp);
+			if (rc == -ETIMEDOUT)
+				goto wait_for_idle;
+		}
 
 
 		if (cmd_enc->autorefresh.cfg.enable)
 		if (cmd_enc->autorefresh.cfg.enable)
 			rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(
 			rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(
@@ -1753,6 +1847,10 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
 	atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
 	atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
 	init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
 	init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
+	INIT_LIST_HEAD(&cmd_enc->te_timestamp_list);
+	for (i = 0; i < MAX_TE_PROFILE_COUNT; i++)
+		list_add(&cmd_enc->te_timestamp[i].list,
+				&cmd_enc->te_timestamp_list);
 
 
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");
 	SDE_DEBUG_CMDENC(cmd_enc, "created\n");