disp: msm: sde: expose helper api at VM layer

Add helper API's at VM layer to avoid repeated
VM structure dereferencing in rest of SDE files.

Change-Id: I31ee506adbd12c5452bef15da7c468c87b2c9f96
Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
This commit is contained in:
Jeykumar Sankaran
2020-07-23 13:08:11 -07:00
parent 7b50ca2b1e
commit a962ce3f02
3 changed files with 110 additions and 59 deletions

View File

@@ -82,6 +82,7 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
int rc = 0; int rc = 0;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct sde_kms *sde_kms; struct sde_kms *sde_kms;
struct sde_vm_ops *vm_ops;
brightness = bd->props.brightness; brightness = bd->props.brightness;
@@ -113,17 +114,12 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
return 0; return 0;
} }
if (sde_kms->vm) { sde_vm_lock(sde_kms);
struct sde_vm_ops *vm_ops = &sde_kms->vm->vm_ops;
mutex_lock(&sde_kms->vm->vm_res_lock); vm_ops = sde_vm_get_ops(sde_kms);
if (vm_ops && vm_ops->vm_owns_hw && !vm_ops->vm_owns_hw(sde_kms)) {
if (vm_ops->vm_owns_hw && !vm_ops->vm_owns_hw(sde_kms)) { SDE_DEBUG("skipping bl update due to HW unavailablity\n");
SDE_DEBUG( goto done;
"skipping bl update due to HW unavailablity\n");
mutex_unlock(&sde_kms->vm->vm_res_lock);
return 0;
}
} }
if (c_conn->ops.set_backlight) { if (c_conn->ops.set_backlight) {
@@ -139,8 +135,8 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
c_conn->unset_bl_level = 0; c_conn->unset_bl_level = 0;
} }
if (sde_kms->vm) done:
mutex_unlock(&sde_kms->vm->vm_res_lock); sde_vm_unlock(sde_kms);
return rc; return rc;
} }

View File

@@ -205,13 +205,10 @@ static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
static bool _sde_kms_skip_vblank_op(struct sde_kms *sde_kms) static bool _sde_kms_skip_vblank_op(struct sde_kms *sde_kms)
{ {
struct sde_vm_ops *vm_ops = NULL; struct sde_vm_ops *vm_ops = sde_vm_get_ops(sde_kms);
if (!sde_kms->vm) if (vm_ops && vm_ops->vm_owns_hw
return false; && !vm_ops->vm_owns_hw(sde_kms))
vm_ops = &sde_kms->vm->vm_ops;
if (!vm_ops->vm_owns_hw(sde_kms))
return true; return true;
return false; return false;
@@ -227,21 +224,18 @@ static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
sde_kms = to_sde_kms(kms); sde_kms = to_sde_kms(kms);
if (sde_kms->vm) sde_vm_lock(sde_kms);
mutex_lock(&sde_kms->vm->vm_res_lock);
if (_sde_kms_skip_vblank_op(sde_kms)) { if (_sde_kms_skip_vblank_op(sde_kms)) {
SDE_DEBUG("skipping vblank enable due to HW unavailablity\n"); SDE_DEBUG("skipping vblank enable due to HW unavailablity\n");
mutex_unlock(&sde_kms->vm->vm_res_lock); goto done;
return 0;
} }
SDE_ATRACE_BEGIN("sde_kms_enable_vblank"); SDE_ATRACE_BEGIN("sde_kms_enable_vblank");
ret = sde_crtc_vblank(crtc, true); ret = sde_crtc_vblank(crtc, true);
SDE_ATRACE_END("sde_kms_enable_vblank"); SDE_ATRACE_END("sde_kms_enable_vblank");
done:
if (sde_kms->vm) sde_vm_unlock(sde_kms);
mutex_unlock(&sde_kms->vm->vm_res_lock);
return ret; return ret;
} }
@@ -255,21 +249,18 @@ static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
sde_kms = to_sde_kms(kms); sde_kms = to_sde_kms(kms);
if (sde_kms->vm) sde_vm_lock(sde_kms);
mutex_lock(&sde_kms->vm->vm_res_lock);
if (_sde_kms_skip_vblank_op(sde_kms)) { if (_sde_kms_skip_vblank_op(sde_kms)) {
SDE_DEBUG("skipping vblank disable due to HW unavailablity\n"); SDE_DEBUG("skipping vblank disable due to HW unavailablity\n");
mutex_unlock(&sde_kms->vm->vm_res_lock); goto done;
return;
} }
SDE_ATRACE_BEGIN("sde_kms_disable_vblank"); SDE_ATRACE_BEGIN("sde_kms_disable_vblank");
sde_crtc_vblank(crtc, false); sde_crtc_vblank(crtc, false);
SDE_ATRACE_END("sde_kms_disable_vblank"); SDE_ATRACE_END("sde_kms_disable_vblank");
done:
if (sde_kms->vm) sde_vm_unlock(sde_kms);
mutex_unlock(&sde_kms->vm->vm_res_lock);
} }
static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms, static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
@@ -962,11 +953,10 @@ int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
ddev = sde_kms->dev; ddev = sde_kms->dev;
if (!sde_kms->vm) vm_ops = sde_vm_get_ops(sde_kms);
if (!vm_ops)
return -EINVAL; return -EINVAL;
vm_ops = &sde_kms->vm->vm_ops;
crtc = state->crtcs[0].ptr; crtc = state->crtcs[0].ptr;
cstate = to_sde_crtc_state(state->crtcs[0].new_state); cstate = to_sde_crtc_state(state->crtcs[0].new_state);
@@ -1085,11 +1075,10 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
*/ */
sde_kms_prepare_secure_transition(kms, state); sde_kms_prepare_secure_transition(kms, state);
if (!sde_kms->vm) vm_ops = sde_vm_get_ops(sde_kms);
if (!vm_ops)
goto end; goto end;
vm_ops = &sde_kms->vm->vm_ops;
if (vm_ops->vm_prepare_commit) if (vm_ops->vm_prepare_commit)
vm_ops->vm_prepare_commit(sde_kms, state); vm_ops->vm_prepare_commit(sde_kms, state);
end: end:
@@ -1250,10 +1239,10 @@ int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
enum sde_crtc_vm_req vm_req; enum sde_crtc_vm_req vm_req;
int rc = 0; int rc = 0;
if (!sde_kms || !sde_kms->vm) if (!sde_kms || !sde_vm_is_enabled(sde_kms))
return -EINVAL; return -EINVAL;
vm_ops = &sde_kms->vm->vm_ops; vm_ops = sde_vm_get_ops(sde_kms);
ddev = sde_kms->dev; ddev = sde_kms->dev;
crtc = state->crtcs[0].ptr; crtc = state->crtcs[0].ptr;
@@ -1332,10 +1321,10 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
enum sde_crtc_vm_req vm_req; enum sde_crtc_vm_req vm_req;
int rc = 0; int rc = 0;
if (!sde_kms || !sde_kms->vm) if (!sde_kms || !sde_vm_is_enabled(sde_kms))
return -EINVAL; return -EINVAL;
vm_ops = &sde_kms->vm->vm_ops; vm_ops = sde_vm_get_ops(sde_kms);
crtc = state->crtcs[0].ptr; crtc = state->crtcs[0].ptr;
cstate = to_sde_crtc_state(state->crtcs[0].new_state); cstate = to_sde_crtc_state(state->crtcs[0].new_state);
@@ -1427,16 +1416,13 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
} }
} }
if (sde_kms->vm) { vm_ops = sde_vm_get_ops(sde_kms);
vm_ops = &sde_kms->vm->vm_ops; if (vm_ops && vm_ops->vm_post_commit) {
if (vm_ops->vm_post_commit) {
rc = vm_ops->vm_post_commit(sde_kms, old_state); rc = vm_ops->vm_post_commit(sde_kms, old_state);
if (rc) if (rc)
SDE_ERROR("vm post commit failed, rc = %d\n", SDE_ERROR("vm post commit failed, rc = %d\n",
rc); rc);
} }
}
pm_runtime_put_sync(sde_kms->dev->dev); pm_runtime_put_sync(sde_kms->dev->dev);
@@ -2120,6 +2106,7 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
{ {
struct drm_device *dev; struct drm_device *dev;
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct sde_vm_ops *vm_ops;
int i; int i;
if (!sde_kms || !pdev) if (!sde_kms || !pdev)
@@ -2139,8 +2126,9 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
of_genpd_del_provider(pdev->dev.of_node); of_genpd_del_provider(pdev->dev.of_node);
} }
if (sde_kms->vm && sde_kms->vm->vm_ops.vm_deinit) vm_ops = sde_vm_get_ops(sde_kms);
sde_kms->vm->vm_ops.vm_deinit(sde_kms, &sde_kms->vm->vm_ops); if (vm_ops && vm_ops->vm_deinit)
vm_ops->vm_deinit(sde_kms, vm_ops);
if (sde_kms->hw_intr) if (sde_kms->hw_intr)
sde_hw_intr_destroy(sde_kms->hw_intr); sde_hw_intr_destroy(sde_kms->hw_intr);
@@ -2427,11 +2415,10 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
sde_kms = to_sde_kms(kms); sde_kms = to_sde_kms(kms);
dev = sde_kms->dev; dev = sde_kms->dev;
if (!sde_kms->vm) vm_ops = sde_vm_get_ops(sde_kms);
if (!vm_ops)
return 0; return 0;
vm_ops = &sde_kms->vm->vm_ops;
for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) { for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
struct sde_crtc_state *old_state = NULL, *new_state = NULL; struct sde_crtc_state *old_state = NULL, *new_state = NULL;
@@ -2495,14 +2482,15 @@ static int sde_kms_check_vm_request(struct msm_kms *kms,
return -EINVAL; return -EINVAL;
} }
mutex_lock(&sde_kms->vm->vm_res_lock); sde_vm_lock(sde_kms);
if (vm_ops->vm_request_valid) if (vm_ops->vm_request_valid)
rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req); rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
if (rc) if (rc)
SDE_ERROR( SDE_ERROR(
"failed to complete vm transition request. old_state = %d, new_state = %d, hw_ownership: %d\n", "failed to complete vm transition request. old_state = %d, new_state = %d, hw_ownership: %d\n",
old_vm_req, new_vm_req, vm_ops->vm_owns_hw(sde_kms)); old_vm_req, new_vm_req, vm_ops->vm_owns_hw(sde_kms));
mutex_unlock(&sde_kms->vm->vm_res_lock); sde_vm_unlock(sde_kms);
return rc; return rc;
} }
@@ -4287,6 +4275,7 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
struct msm_drm_private *priv; struct msm_drm_private *priv;
struct sde_splash_display *handoff_display; struct sde_splash_display *handoff_display;
struct dsi_display *display; struct dsi_display *display;
struct sde_vm_ops *vm_ops;
int ret, i; int ret, i;
if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) { if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
@@ -4294,7 +4283,8 @@ int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms)
return -EINVAL; return -EINVAL;
} }
if (!sde_kms->vm->vm_ops.vm_owns_hw(sde_kms)) { vm_ops = sde_vm_get_ops(sde_kms);
if (vm_ops && vm_ops->vm_owns_hw(sde_kms)) {
SDE_DEBUG( SDE_DEBUG(
"skipping sde res init as device assign is not completed\n"); "skipping sde res init as device assign is not completed\n");
return 0; return 0;

View File

@@ -164,6 +164,53 @@ int sde_vm_primary_init(struct sde_kms *kms);
* @return - 0 on success * @return - 0 on success
*/ */
int sde_vm_trusted_init(struct sde_kms *kms); int sde_vm_trusted_init(struct sde_kms *kms);
/**
* sde_vm_is_enabled - check whether TUI feature is enabled
* @sde_kms - pointer to sde_kms
* @return - true if enabled, false otherwise
*/
static inline bool sde_vm_is_enabled(struct sde_kms *sde_kms)
{
return !!sde_kms->vm;
}
/**
* sde_vm_lock - lock vm variables
* @sde_kms - pointer to sde_kms
*/
static inline void sde_vm_lock(struct sde_kms *sde_kms)
{
if (!sde_kms->vm)
return;
mutex_lock(&sde_kms->vm->vm_res_lock);
}
/**
* sde_vm_unlock - unlock vm variables
* @sde_kms - pointer to sde_kms
*/
static inline void sde_vm_unlock(struct sde_kms *sde_kms)
{
if (!sde_kms->vm)
return;
mutex_unlock(&sde_kms->vm->vm_res_lock);
}
/**
* sde_vm_get_ops - helper API to retrieve sde_vm_ops
* @sde_kms - pointer to sde_kms
* @return - pointer to sde_vm_ops
*/
static inline struct sde_vm_ops *sde_vm_get_ops(struct sde_kms *sde_kms)
{
if (!sde_kms->vm)
return NULL;
return &sde_kms->vm->vm_ops;
}
#else #else
static inline int sde_vm_primary_init(struct sde_kms *kms) static inline int sde_vm_primary_init(struct sde_kms *kms)
{ {
@@ -175,5 +222,23 @@ static inline int sde_vm_trusted_init(struct sde_kms *kms)
return 0; return 0;
} }
static inline bool sde_vm_is_enabled(struct sde_kms *sde_kms)
{
return false;
}
static inline void sde_vm_lock(struct sde_kms *sde_kms)
{
}
static inline void sde_vm_unlock(struct sde_kms *sde_kms)
{
}
static inline struct sde_vm_ops *sde_vm_get_ops(struct sde_kms *sde_kms)
{
return NULL;
}
#endif /* IS_ENABLED(CONFIG_DRM_SDE_VM) */ #endif /* IS_ENABLED(CONFIG_DRM_SDE_VM) */
#endif /* __SDE_VM_H__ */ #endif /* __SDE_VM_H__ */