Ver Fonte

disp: msm: sde: trigger pm_qos vote with encoder idle pc

Commit d46cae019ea6 ("disp: msm: sde: trigger pm_qos vote
with irq enable") moves the pm_qos vote with irq
enable/disable state. Such irq enable/disable call may be
triggered from atomic context and lead to scheduling issues
due to mutex_lock usage in pm_qos APIs. This change moves
the vote with encoder idle pc to allow lock usage with sleep.

Change-Id: I2d22566fbfb5399c5d2d2a4efe882a1928cfbbf8
Signed-off-by: Dhaval Patel <[email protected]>
Dhaval Patel há 5 anos atrás
pai
commit
5a6facc39e
4 ficheiros alterados com 17 adições e 25 exclusões
  1. 0 14
      msm/sde/sde_core_irq.c
  2. 1 0
      msm/sde/sde_encoder.c
  3. 10 3
      msm/sde/sde_kms.c
  4. 6 8
      msm/sde/sde_kms.h

+ 0 - 14
msm/sde/sde_core_irq.c

@@ -97,7 +97,6 @@ static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
 {
 	unsigned long irq_flags;
 	int ret = 0;
-	bool update_vote = false;
 
 	if (!sde_kms || !sde_kms->hw_intr ||
 			!sde_kms->irq_obj.enable_counts ||
@@ -128,16 +127,11 @@ static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
 		spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
 		ret = sde_kms->hw_intr->ops.enable_irq_nolock(
 				sde_kms->hw_intr, irq_idx);
-		if (atomic_inc_return(&sde_kms->irq_obj.curr_irq_enable_count)
-				== 1)
-			update_vote = true;
 		spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
 	}
 
 	if (ret)
 		SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n", irq_idx);
-	else if (update_vote)
-		sde_kms_irq_enable_notify(sde_kms, true);
 
 	SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
 
@@ -168,7 +162,6 @@ static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
 {
 	int ret = 0;
 	unsigned long irq_flags;
-	bool update_vote = false;
 
 	if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
 		SDE_ERROR("invalid params\n");
@@ -191,16 +184,10 @@ static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
 		&& atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0)
 		ret = sde_kms->hw_intr->ops.disable_irq_nolock(
 				sde_kms->hw_intr, irq_idx);
-
-	if (atomic_add_unless(&sde_kms->irq_obj.curr_irq_enable_count, -1, 0)
-		&& atomic_read(&sde_kms->irq_obj.curr_irq_enable_count) == 0)
-		update_vote = true;
 	spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
 
 	if (ret)
 		SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n", irq_idx);
-	else if (update_vote)
-		sde_kms_irq_enable_notify(sde_kms, false);
 	SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
 
 	return ret;
@@ -500,7 +487,6 @@ void sde_core_irq_preinstall(struct sde_kms *sde_kms)
 		if (sde_kms->irq_obj.irq_counts)
 			atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
 	}
-	atomic_set(&sde_kms->irq_obj.curr_irq_enable_count, 0);
 }
 
 int sde_core_irq_postinstall(struct sde_kms *sde_kms)

+ 1 - 0
msm/sde/sde_encoder.c

@@ -1361,6 +1361,7 @@ void sde_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
 		if (phys && phys->ops.irq_control)
 			phys->ops.irq_control(phys, enable);
 	}
+	sde_kms_cpu_vote_for_irq(sde_encoder_get_kms(drm_enc), enable);
 
 }
 

+ 10 - 3
msm/sde/sde_kms.c

@@ -3130,12 +3130,18 @@ static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
 	}
 }
 
-void sde_kms_irq_enable_notify(struct sde_kms *sde_kms, bool enable)
+void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
 {
-	if (enable)
+	struct msm_drm_private *priv = sde_kms->dev->dev_private;
+
+	mutex_lock(&priv->phandle.phandle_lock);
+
+	if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
 		_sde_kms_update_pm_qos_irq_request(sde_kms);
-	else
+	else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
 		_sde_kms_remove_pm_qos_irq_request(sde_kms);
+
+	mutex_unlock(&priv->phandle.phandle_lock);
 }
 
 static void sde_kms_irq_affinity_notify(
@@ -3670,6 +3676,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 
 	atomic_set(&sde_kms->detach_sec_cb, 0);
 	atomic_set(&sde_kms->detach_all_cb, 0);
+	atomic_set(&sde_kms->irq_vote_count, 0);
 
 	/*
 	 * Support format modifiers for compression etc.

+ 6 - 8
msm/sde/sde_kms.h

@@ -222,8 +222,6 @@ struct sde_irq_callback {
  * @enable_counts array of IRQ enable counts
  * @cb_lock:      callback lock
  * @debugfs_file: debugfs file for irq statistics
- * @curr_irq_enable_count: Atomic counter keep track of total current irq enable
- *                         It is used to keep pm_qos vote on CPU.
  */
 struct sde_irq {
 	u32 total_irqs;
@@ -232,7 +230,6 @@ struct sde_irq {
 	atomic_t *irq_counts;
 	spinlock_t cb_lock;
 	struct dentry *debugfs_file;
-	atomic_t curr_irq_enable_count;
 };
 
 /**
@@ -308,6 +305,7 @@ struct sde_kms {
 	bool pm_suspend_clk_dump;
 
 	cpumask_t irq_cpu_mask;
+	atomic_t irq_vote_count;
 	struct dev_pm_qos_request pm_qos_irq_req[NR_CPUS];
 	struct irq_affinity_notify affinity_notify;
 };
@@ -684,12 +682,12 @@ void sde_kms_timeline_status(struct drm_device *dev);
 int sde_kms_handle_recovery(struct drm_encoder *encoder);
 
 /**
- * Notifies the irq enable on first interrupt enable and irq disable
- * on last interrupt disable.
- * @sde_kms: poiner to sde_kms structure
- * @enable: true if irq enabled, false for disabled state.
+ * sde_kms_cpu_vote_for_irq() - API to keep pm_qos latency vote on cpu
+ * where mdss_irq is scheduled
+ * @sde_kms: pointer to sde_kms structure
+ * @enable: true if enable request, false otherwise.
  */
-void sde_kms_irq_enable_notify(struct sde_kms *sde_kms, bool enable);
+void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable);
 
 /**
  * sde_kms_get_io_resources() - reads associated register range