disp: msm: sde: trigger pm_qos vote with irq enable

Video mode display keeps the MDP clocks in ON state but
disables irq during static screen to avoid cpu wakeup.
In such case, CPU pm_qos vote should also be removed
to allow LPM transition. This change triggers the
pm_qos vote based on mdp interrupt enable counts
instead of runtime_pm callback. It works for
multi-display concurrency also.

Change-Id: I7a60f3f593e409269e00abd7499c4a5756035615
Signed-off-by: Dhaval Patel <pdhaval@codeaurora.org>
This commit is contained in:
Dhaval Patel
2020-05-18 17:57:59 -07:00
parent 281e18b317
commit d46cae019e
3 changed files with 36 additions and 5 deletions

View File

@@ -97,6 +97,7 @@ static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
{
unsigned long irq_flags;
int ret = 0;
bool update_vote = false;
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->irq_obj.enable_counts ||
@@ -127,11 +128,16 @@ static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
ret = sde_kms->hw_intr->ops.enable_irq_nolock(
sde_kms->hw_intr, irq_idx);
if (atomic_inc_return(&sde_kms->irq_obj.curr_irq_enable_count)
== 1)
update_vote = true;
spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
}
if (ret)
SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n", irq_idx);
else if (update_vote)
sde_kms_irq_enable_notify(sde_kms, true);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
@@ -162,6 +168,7 @@ static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
{
int ret = 0;
unsigned long irq_flags;
bool update_vote = false;
if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
SDE_ERROR("invalid params\n");
@@ -184,10 +191,16 @@ static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
&& atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0)
ret = sde_kms->hw_intr->ops.disable_irq_nolock(
sde_kms->hw_intr, irq_idx);
if (atomic_add_unless(&sde_kms->irq_obj.curr_irq_enable_count, -1, 0)
&& atomic_read(&sde_kms->irq_obj.curr_irq_enable_count) == 0)
update_vote = true;
spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
if (ret)
SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n", irq_idx);
else if (update_vote)
sde_kms_irq_enable_notify(sde_kms, false);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
return ret;
@@ -484,6 +497,7 @@ void sde_core_irq_preinstall(struct sde_kms *sde_kms)
if (sde_kms->irq_obj.irq_counts)
atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
}
atomic_set(&sde_kms->irq_obj.curr_irq_enable_count, 0);
}
int sde_core_irq_postinstall(struct sde_kms *sde_kms)

View File

@@ -3004,7 +3004,7 @@ static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
return 0;
}
static void sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
@@ -3033,7 +3033,7 @@ static void sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
}
}
static void sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
@@ -3057,6 +3057,14 @@ static void sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
}
}
void sde_kms_irq_enable_notify(struct sde_kms *sde_kms, bool enable)
{
if (enable)
_sde_kms_update_pm_qos_irq_request(sde_kms);
else
_sde_kms_remove_pm_qos_irq_request(sde_kms);
}
static void sde_kms_irq_affinity_notify(
struct irq_affinity_notify *affinity_notify,
const cpumask_t *mask)
@@ -3077,7 +3085,7 @@ static void sde_kms_irq_affinity_notify(
// request vote with updated irq cpu mask
if (sde_kms->irq_enabled)
sde_kms_update_pm_qos_irq_request(sde_kms);
_sde_kms_update_pm_qos_irq_request(sde_kms);
mutex_unlock(&priv->phandle.phandle_lock);
}
@@ -3104,9 +3112,7 @@ static void sde_kms_handle_power_event(u32 event_type, void *usr)
sde_kms_init_shared_hw(sde_kms);
_sde_kms_set_lutdma_vbif_remap(sde_kms);
sde_kms->first_kickoff = true;
sde_kms_update_pm_qos_irq_request(sde_kms);
} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
sde_kms_remove_pm_qos_irq_request(sde_kms);
sde_irq_update(msm_kms, false);
sde_kms->first_kickoff = false;
_sde_kms_active_override(sde_kms, true);

View File

@@ -222,6 +222,8 @@ struct sde_irq_callback {
* @enable_counts array of IRQ enable counts
* @cb_lock: callback lock
* @debugfs_file: debugfs file for irq statistics
* @curr_irq_enable_count: Atomic counter keep track of total current irq enable
* It is used to keep pm_qos vote on CPU.
*/
struct sde_irq {
u32 total_irqs;
@@ -230,6 +232,7 @@ struct sde_irq {
atomic_t *irq_counts;
spinlock_t cb_lock;
struct dentry *debugfs_file;
atomic_t curr_irq_enable_count;
};
/**
@@ -667,4 +670,12 @@ void sde_kms_timeline_status(struct drm_device *dev);
*/
int sde_kms_handle_recovery(struct drm_encoder *encoder);
/**
* Notifies the irq enable on first interrupt enable and irq disable
* on last interrupt disable.
* @sde_kms: poiner to sde_kms structure
* @enable: true if irq enabled, false for disabled state.
*/
void sde_kms_irq_enable_notify(struct sde_kms *sde_kms, bool enable);
#endif /* __sde_kms_H__ */