disp: msm: sde: qos vote for all cpus during vm transition

For a proxy-scheduled VCPU like the TUI VM, assignment to a
physical core is a runtime decision made by the HLOS scheduler,
and it may change frequently. pm_qos vote added by PVM for
specific CPUs won't be sufficient for addressing irq latency.
This change updates votes for all possible CPUs during TVM
entry and also removes the vote during exit.

Change-Id: Iab5cb5f57e2389ee57689ba2ab69394376f59788
Signed-off-by: Mahadevan <quic_mahap@quicinc.com>
This commit is contained in:
Mahadevan
2023-02-08 20:33:58 +05:30
committed by Gerrit - the friendly Code Review server
parent 1ca5ff7768
commit 7c8a28d45f

View File

@@ -1084,6 +1084,62 @@ static struct drm_crtc *sde_kms_vm_get_vm_crtc(
return vm_crtc; return vm_crtc;
} }
static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
{
struct device *cpu_dev;
int cpu = 0;
u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
// save irq cpu mask
sde_kms->irq_cpu_mask = *mask;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
cpu_irq_latency);
else
dev_pm_qos_add_request(cpu_dev,
&sde_kms->pm_qos_irq_req[cpu],
DEV_PM_QOS_RESUME_LATENCY,
cpu_irq_latency);
}
}
static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
{
struct device *cpu_dev;
int cpu = 0;
if (cpumask_empty(mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_remove_request(
&sde_kms->pm_qos_irq_req[cpu]);
}
}
int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms, int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
@@ -1121,6 +1177,8 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs) if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr); sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
_sde_kms_remove_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
/* enable the display path IRQ's */ /* enable the display path IRQ's */
drm_for_each_encoder_mask(encoder, crtc->dev, drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) { crtc->state->encoder_mask) {
@@ -1411,6 +1469,7 @@ int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
if (is_primary) { if (is_primary) {
_sde_kms_update_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
/* disable vblank events */ /* disable vblank events */
drm_crtc_vblank_off(crtc); drm_crtc_vblank_off(crtc);
@@ -4491,60 +4550,6 @@ static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
return 0; return 0;
} }
static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
cpu_irq_latency);
else
dev_pm_qos_add_request(cpu_dev,
&sde_kms->pm_qos_irq_req[cpu],
DEV_PM_QOS_RESUME_LATENCY,
cpu_irq_latency);
}
}
static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
return;
}
for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_remove_request(
&sde_kms->pm_qos_irq_req[cpu]);
}
}
void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable) void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
{ {
struct msm_drm_private *priv = sde_kms->dev->dev_private; struct msm_drm_private *priv = sde_kms->dev->dev_private;
@@ -4552,9 +4557,9 @@ void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
mutex_lock(&priv->phandle.phandle_lock); mutex_lock(&priv->phandle.phandle_lock);
if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1) if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
_sde_kms_update_pm_qos_irq_request(sde_kms); _sde_kms_update_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0) else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
_sde_kms_remove_pm_qos_irq_request(sde_kms); _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
mutex_unlock(&priv->phandle.phandle_lock); mutex_unlock(&priv->phandle.phandle_lock);
} }
@@ -4574,13 +4579,11 @@ static void sde_kms_irq_affinity_notify(
mutex_lock(&priv->phandle.phandle_lock); mutex_lock(&priv->phandle.phandle_lock);
_sde_kms_remove_pm_qos_irq_request(sde_kms); _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
// save irq cpu mask
sde_kms->irq_cpu_mask = *mask;
// request vote with updated irq cpu mask // request vote with updated irq cpu mask
if (atomic_read(&sde_kms->irq_vote_count)) if (atomic_read(&sde_kms->irq_vote_count))
_sde_kms_update_pm_qos_irq_request(sde_kms); _sde_kms_update_pm_qos_irq_request(sde_kms, mask);
mutex_unlock(&priv->phandle.phandle_lock); mutex_unlock(&priv->phandle.phandle_lock);
} }