disp: msm: sde: cancel delayed work items during TUI transition

Delayed work items may touch HW registers. If these work items
run while HW is not owned by this VM it will lead to invalid
access. This happens in video mode as HAL does not disable idle
power-collapse in this mode. It can also happen with ESD status
if lastclose or TUI transition failure occurs.

Although there is a contract with user mode to turn off certain
features, kernel cannot rely on it to always do the right thing.
Prevent potential crashes from certain corner cases by
cancelling all delayed work items when the HW ownership is
transferred.

Change-Id: I08da17f2ce72bf2fddf71924c3e8edd2e2715be8
Signed-off-by: Steve Cohen <cohens@codeaurora.org>
此提交包含在:
Steve Cohen
2021-07-09 19:31:03 -04:00
父節點 a5da9d0045
當前提交 a42fd877c7
共有 5 個檔案被更改,包括 113 行新增63 行删除

查看文件

@@ -1049,6 +1049,7 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
struct drm_connector *connector;
struct sde_vm_ops *vm_ops;
struct sde_crtc_state *cstate;
struct drm_connector_list_iter iter;
enum sde_crtc_vm_req vm_req;
int rc = 0;
@@ -1085,9 +1086,11 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
}
/* Schedule ESD work */
list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter)
if (drm_connector_mask(connector) & crtc->state->connector_mask)
sde_connector_schedule_status_work(connector, true);
drm_connector_list_iter_end(&iter);
/* enable vblank events */
drm_crtc_vblank_on(crtc);
@@ -1296,6 +1299,72 @@ static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
}
}
static void sde_kms_cancel_delayed_work(struct drm_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_encoder *encoder;
/* Cancel CRTC work */
sde_crtc_cancel_delayed_work(crtc);
/* Cancel ESD work */
drm_connector_list_iter_begin(crtc->dev, &iter);
drm_for_each_connector_iter(connector, &iter)
if (drm_connector_mask(connector) & crtc->state->connector_mask)
sde_connector_schedule_status_work(connector, false);
drm_connector_list_iter_end(&iter);
/* Cancel Idle-PC work */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
if (sde_encoder_in_clone_mode(encoder))
continue;
sde_encoder_cancel_delayed_work(encoder);
}
}
int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
struct drm_atomic_state *state, bool is_primary)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
int rc = 0;
crtc = sde_kms_vm_get_vm_crtc(state);
if (!crtc)
return 0;
/* if vm_req is enabled, once CRTC on the commit is guaranteed */
sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
sde_kms_cancel_delayed_work(crtc);
/* disable SDE irq's */
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
if (sde_encoder_in_clone_mode(encoder))
continue;
sde_encoder_irq_control(encoder, false);
}
if (is_primary) {
/* disable IRQ line */
sde_irq_update(&sde_kms->base, false);
/* disable vblank events */
drm_crtc_vblank_off(crtc);
/* reset sw state */
sde_crtc_reset_sw_state(crtc);
}
sde_dbg_set_hw_ownership_status(false);
return rc;
}
int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
struct drm_atomic_state *state)
{
@@ -1303,7 +1372,6 @@ int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
struct drm_device *ddev;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct sde_crtc_state *cstate;
struct drm_crtc_state *new_cstate;
enum sde_crtc_vm_req vm_req;
@@ -1325,24 +1393,13 @@ int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
if (vm_req != VM_REQ_RELEASE)
return 0;
/* if vm_req is enabled, once CRTC on the commit is guaranteed */
sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
if (sde_encoder_in_clone_mode(encoder))
continue;
sde_encoder_irq_control(encoder, false);
}
sde_kms_vm_pre_release(sde_kms, state, false);
list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
sde_plane_set_sid(plane, 0);
sde_hw_set_lutdma_sid(sde_kms->hw_sid, 0);
sde_dbg_set_hw_ownership_status(false);
sde_vm_lock(sde_kms);
if (vm_ops->vm_release)
@@ -1353,54 +1410,6 @@ int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
return rc;
}
int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
struct drm_atomic_state *state)
{
struct drm_device *ddev;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int rc = 0;
ddev = sde_kms->dev;
crtc = sde_kms_vm_get_vm_crtc(state);
if (!crtc)
return 0;
/* if vm_req is enabled, once CRTC on the commit is guaranteed */
sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
/* disable ESD work */
list_for_each_entry(connector,
&ddev->mode_config.connector_list, head) {
if (drm_connector_mask(connector) & crtc->state->connector_mask)
sde_connector_schedule_status_work(connector, false);
}
/* disable SDE irq's */
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) {
if (sde_encoder_in_clone_mode(encoder))
continue;
sde_encoder_irq_control(encoder, false);
}
/* disable IRQ line */
sde_irq_update(&sde_kms->base, false);
/* disable vblank events */
drm_crtc_vblank_off(crtc);
/* reset sw state */
sde_crtc_reset_sw_state(crtc);
sde_dbg_set_hw_ownership_status(false);
return rc;
}
int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
struct drm_atomic_state *state)
{
@@ -1427,7 +1436,7 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
return 0;
/* handle SDE pre-release */
rc = sde_kms_vm_pre_release(sde_kms, state);
rc = sde_kms_vm_pre_release(sde_kms, state, true);
if (rc) {
SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
goto exit;