|
@@ -2945,6 +2945,86 @@ static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
|
|
|
+{
|
|
|
+ struct device *cpu_dev;
|
|
|
+ int cpu = 0;
|
|
|
+
|
|
|
+ if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
|
|
|
+ SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
|
|
|
+ cpu_dev = get_cpu_device(cpu);
|
|
|
+ if (!cpu_dev) {
|
|
|
+ SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
|
|
|
+ cpu);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
|
|
|
+ dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
|
|
|
+ sde_kms->catalog->perf.cpu_dma_latency);
|
|
|
+ else
|
|
|
+ dev_pm_qos_add_request(cpu_dev,
|
|
|
+ &sde_kms->pm_qos_irq_req[cpu],
|
|
|
+ DEV_PM_QOS_RESUME_LATENCY,
|
|
|
+ sde_kms->catalog->perf.cpu_dma_latency);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
|
|
|
+{
|
|
|
+ struct device *cpu_dev;
|
|
|
+ int cpu = 0;
|
|
|
+
|
|
|
+ if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
|
|
|
+ SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
|
|
|
+ cpu_dev = get_cpu_device(cpu);
|
|
|
+ if (!cpu_dev) {
|
|
|
+ SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
|
|
|
+ cpu);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
|
|
|
+ dev_pm_qos_remove_request(
|
|
|
+ &sde_kms->pm_qos_irq_req[cpu]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void sde_kms_irq_affinity_notify(
|
|
|
+ struct irq_affinity_notify *affinity_notify,
|
|
|
+ const cpumask_t *mask)
|
|
|
+{
|
|
|
+ struct msm_drm_private *priv;
|
|
|
+ struct sde_kms *sde_kms = container_of(affinity_notify,
|
|
|
+ struct sde_kms, affinity_notify);
|
|
|
+
|
|
|
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
|
|
|
+ return;
|
|
|
+
|
|
|
+ priv = sde_kms->dev->dev_private;
|
|
|
+
|
|
|
+ mutex_lock(&priv->phandle.phandle_lock);
|
|
|
+
|
|
|
+ // save irq cpu mask
|
|
|
+ sde_kms->irq_cpu_mask = *mask;
|
|
|
+
|
|
|
+ // request vote with updated irq cpu mask
|
|
|
+ if (sde_kms->irq_enabled)
|
|
|
+ sde_kms_update_pm_qos_irq_request(sde_kms);
|
|
|
+
|
|
|
+ mutex_unlock(&priv->phandle.phandle_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void sde_kms_irq_affinity_release(struct kref *ref) {}
|
|
|
+
|
|
|
static void sde_kms_handle_power_event(u32 event_type, void *usr)
|
|
|
{
|
|
|
struct sde_kms *sde_kms = usr;
|
|
@@ -2963,7 +3043,9 @@ static void sde_kms_handle_power_event(u32 event_type, void *usr)
|
|
|
sde_kms_init_shared_hw(sde_kms);
|
|
|
_sde_kms_set_lutdma_vbif_remap(sde_kms);
|
|
|
sde_kms->first_kickoff = true;
|
|
|
+ sde_kms_update_pm_qos_irq_request(sde_kms);
|
|
|
} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
|
|
|
+ sde_kms_remove_pm_qos_irq_request(sde_kms);
|
|
|
sde_irq_update(msm_kms, false);
|
|
|
sde_kms->first_kickoff = false;
|
|
|
_sde_kms_active_override(sde_kms, true);
|
|
@@ -3378,7 +3460,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
|
|
|
struct drm_device *dev;
|
|
|
struct msm_drm_private *priv;
|
|
|
struct platform_device *platformdev;
|
|
|
- int i, rc = -EINVAL;
|
|
|
+ int i, irq_num, rc = -EINVAL;
|
|
|
|
|
|
if (!kms) {
|
|
|
SDE_ERROR("invalid kms\n");
|
|
@@ -3452,6 +3534,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
|
|
|
|
|
|
pm_runtime_put_sync(sde_kms->dev->dev);
|
|
|
}
|
|
|
+
|
|
|
+ sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
|
|
|
+ sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
|
|
|
+
|
|
|
+ irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0);
|
|
|
+ SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
|
|
|
+ irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
hw_init_err:
|