Merge "disp: msm: sde: add system cache support for writeback"

This commit is contained in:
qctecmdr
2021-11-19 08:59:50 -08:00
committed by Gerrit - the friendly Code Review server
18 changed files with 691 additions and 377 deletions

View File

@@ -218,6 +218,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_DYN_BIT_CLK, CONNECTOR_PROP_DYN_BIT_CLK,
CONNECTOR_PROP_DIMMING_CTRL, CONNECTOR_PROP_DIMMING_CTRL,
CONNECTOR_PROP_DIMMING_MIN_BL, CONNECTOR_PROP_DIMMING_MIN_BL,
CONNECTOR_PROP_EARLY_FENCE_LINE,
/* enum/bitmask properties */ /* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME, CONNECTOR_PROP_TOPOLOGY_NAME,

View File

@@ -3099,13 +3099,6 @@ static int _sde_connector_install_properties(struct drm_device *dev,
msm_property_install_enum(&c_conn->property_info, "dsc_mode", 0, msm_property_install_enum(&c_conn->property_info, "dsc_mode", 0,
0, e_dsc_mode, ARRAY_SIZE(e_dsc_mode), 0, CONNECTOR_PROP_DSC_MODE); 0, e_dsc_mode, ARRAY_SIZE(e_dsc_mode), 0, CONNECTOR_PROP_DSC_MODE);
if (display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
msm_property_install_enum(&c_conn->property_info,
"frame_trigger_mode", 0, 0,
e_frame_trigger_mode,
ARRAY_SIZE(e_frame_trigger_mode), 0,
CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
if (display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE && if (display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE &&
display_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) display_info->capabilities & MSM_DISPLAY_CAP_VID_MODE)
msm_property_install_enum(&c_conn->property_info, msm_property_install_enum(&c_conn->property_info,
@@ -3126,6 +3119,12 @@ static int _sde_connector_install_properties(struct drm_device *dev,
} }
} }
if ((display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
|| (connector_type == DRM_MODE_CONNECTOR_VIRTUAL))
msm_property_install_enum(&c_conn->property_info, "frame_trigger_mode",
0, 0, e_frame_trigger_mode, ARRAY_SIZE(e_frame_trigger_mode), 0,
CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
msm_property_install_range(&c_conn->property_info, "bl_scale", msm_property_install_range(&c_conn->property_info, "bl_scale",
0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL, 0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL,
CONNECTOR_PROP_BL_SCALE); CONNECTOR_PROP_BL_SCALE);

View File

@@ -830,10 +830,10 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
struct sde_crtc_state *crtc_state; struct sde_crtc_state *crtc_state;
struct sde_rect *crtc_roi; struct sde_rect *crtc_roi;
struct msm_mode_info mode_info; struct msm_mode_info mode_info;
int i = 0; int i = 0, rc;
int rc; bool is_crtc_roi_dirty, is_conn_roi_dirty;
bool is_crtc_roi_dirty; u32 crtc_width, crtc_height;
bool is_conn_roi_dirty; struct drm_display_mode *adj_mode;
if (!crtc || !state) if (!crtc || !state)
return -EINVAL; return -EINVAL;
@@ -904,15 +904,16 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi); sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
/* clear the ROI to null if it matches full screen anyways */ /* clear the ROI to null if it matches full screen anyways */
adj_mode = &state->adjusted_mode;
crtc_width = sde_crtc_get_width(sde_crtc, crtc_state, adj_mode);
crtc_height = sde_crtc_get_mixer_height(sde_crtc, crtc_state, adj_mode);
if (crtc_roi->x == 0 && crtc_roi->y == 0 && if (crtc_roi->x == 0 && crtc_roi->y == 0 &&
crtc_roi->w == state->adjusted_mode.hdisplay && crtc_roi->w == crtc_width && crtc_roi->h == crtc_height)
crtc_roi->h == state->adjusted_mode.vdisplay)
memset(crtc_roi, 0, sizeof(*crtc_roi)); memset(crtc_roi, 0, sizeof(*crtc_roi));
SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name, SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h); crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
SDE_EVT32_VERBOSE(DRMID(crtc), crtc_roi->x, crtc_roi->y, crtc_roi->w, SDE_EVT32_VERBOSE(DRMID(crtc), crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
crtc_roi->h);
return 0; return 0;
} }
@@ -3299,7 +3300,7 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
} }
/* Display resolution */ /* Display resolution */
hdisplay = mode->hdisplay/sde_crtc->num_mixers; hdisplay = mode->hdisplay / sde_crtc->num_mixers;
/* Validate the DS data */ /* Validate the DS data */
ret = _sde_crtc_check_dest_scaler_validate_ds(crtc, sde_crtc, cstate, ret = _sde_crtc_check_dest_scaler_validate_ds(crtc, sde_crtc, cstate,
@@ -3472,13 +3473,12 @@ static void _sde_crtc_setup_is_ppsplit(struct drm_crtc_state *state)
} }
} }
static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc, static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc, struct drm_crtc_state *state)
struct drm_crtc_state *state)
{ {
struct sde_crtc *sde_crtc; struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate; struct sde_crtc_state *cstate;
struct drm_display_mode *adj_mode; struct drm_display_mode *adj_mode;
u32 crtc_split_width; u32 mixer_width, mixer_height;
int i; int i;
if (!crtc || !state) { if (!crtc || !state) {
@@ -3490,16 +3490,15 @@ static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
cstate = to_sde_crtc_state(state); cstate = to_sde_crtc_state(state);
adj_mode = &state->adjusted_mode; adj_mode = &state->adjusted_mode;
crtc_split_width = sde_crtc_get_mixer_width(sde_crtc, cstate, adj_mode); mixer_width = sde_crtc_get_mixer_width(sde_crtc, cstate, adj_mode);
mixer_height = sde_crtc_get_mixer_height(sde_crtc, cstate, adj_mode);
for (i = 0; i < sde_crtc->num_mixers; i++) { for (i = 0; i < sde_crtc->num_mixers; i++) {
cstate->lm_bounds[i].x = crtc_split_width * i; cstate->lm_bounds[i].x = mixer_width * i;
cstate->lm_bounds[i].y = 0; cstate->lm_bounds[i].y = 0;
cstate->lm_bounds[i].w = crtc_split_width; cstate->lm_bounds[i].w = mixer_width;
cstate->lm_bounds[i].h = cstate->lm_bounds[i].h = mixer_height;
sde_crtc_get_mixer_height(sde_crtc, cstate, adj_mode); memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i], sizeof(cstate->lm_roi[i]));
memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
sizeof(cstate->lm_roi[i]));
SDE_EVT32_VERBOSE(DRMID(crtc), i, SDE_EVT32_VERBOSE(DRMID(crtc), i,
cstate->lm_bounds[i].x, cstate->lm_bounds[i].y, cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
cstate->lm_bounds[i].w, cstate->lm_bounds[i].h); cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
@@ -4133,25 +4132,23 @@ static int _sde_crtc_vblank_enable(
return ret; return ret;
mutex_lock(&sde_crtc->crtc_lock); mutex_lock(&sde_crtc->crtc_lock);
drm_for_each_encoder_mask(enc, crtc->dev, drm_for_each_encoder_mask(enc, crtc->dev, sde_crtc->cached_encoder_mask) {
sde_crtc->cached_encoder_mask) { if (sde_encoder_in_clone_mode(enc))
SDE_EVT32(DRMID(crtc), DRMID(enc)); continue;
sde_encoder_register_vblank_callback(enc, sde_encoder_register_vblank_callback(enc, sde_crtc_vblank_cb, (void *)crtc);
sde_crtc_vblank_cb, (void *)crtc);
} }
mutex_unlock(&sde_crtc->crtc_lock); mutex_unlock(&sde_crtc->crtc_lock);
} else { } else {
mutex_lock(&sde_crtc->crtc_lock); mutex_lock(&sde_crtc->crtc_lock);
drm_for_each_encoder_mask(enc, crtc->dev, drm_for_each_encoder_mask(enc, crtc->dev, sde_crtc->cached_encoder_mask) {
sde_crtc->cached_encoder_mask) { if (sde_encoder_in_clone_mode(enc))
SDE_EVT32(DRMID(crtc), DRMID(enc)); continue;
sde_encoder_register_vblank_callback(enc, NULL, NULL); sde_encoder_register_vblank_callback(enc, NULL, NULL);
} }
mutex_unlock(&sde_crtc->crtc_lock); mutex_unlock(&sde_crtc->crtc_lock);
pm_runtime_put_sync(crtc->dev->dev); pm_runtime_put_sync(crtc->dev->dev);
} }
@@ -4478,6 +4475,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
u32 power_on; u32 power_on;
bool in_cont_splash = false; bool in_cont_splash = false;
int ret, i; int ret, i;
enum sde_intf_mode intf_mode;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) { if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
SDE_ERROR("invalid crtc\n"); SDE_ERROR("invalid crtc\n");
@@ -4501,6 +4499,9 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
SDE_DEBUG("crtc%d\n", crtc->base.id); SDE_DEBUG("crtc%d\n", crtc->base.id);
/* avoid vblank on/off for virtual display */
intf_mode = sde_crtc_get_intf_mode(crtc, crtc->state);
if ((intf_mode != INTF_MODE_WB_BLOCK) && (intf_mode != INTF_MODE_WB_LINE))
drm_crtc_vblank_off(crtc); drm_crtc_vblank_off(crtc);
mutex_lock(&sde_crtc->crtc_lock); mutex_lock(&sde_crtc->crtc_lock);
@@ -4609,6 +4610,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
int ret, i; int ret, i;
struct sde_crtc_state *cstate; struct sde_crtc_state *cstate;
struct msm_display_mode *msm_mode; struct msm_display_mode *msm_mode;
enum sde_intf_mode intf_mode;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) { if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
SDE_ERROR("invalid crtc\n"); SDE_ERROR("invalid crtc\n");
@@ -4633,10 +4635,15 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
if (!sde_crtc->enabled) { if (!sde_crtc->enabled) {
/* cache the encoder mask now for vblank work */ /* cache the encoder mask now for vblank work */
sde_crtc->cached_encoder_mask = crtc->state->encoder_mask; sde_crtc->cached_encoder_mask = crtc->state->encoder_mask;
/* avoid vblank on/off for virtual display */
intf_mode = sde_crtc_get_intf_mode(crtc, crtc->state);
if ((intf_mode != INTF_MODE_WB_BLOCK) && (intf_mode != INTF_MODE_WB_LINE)) {
/* max possible vsync_cnt(atomic_t) soft counter */ /* max possible vsync_cnt(atomic_t) soft counter */
drm_crtc_set_max_vblank_count(crtc, INT_MAX); drm_crtc_set_max_vblank_count(crtc, INT_MAX);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
} }
}
mutex_lock(&sde_crtc->crtc_lock); mutex_lock(&sde_crtc->crtc_lock);
SDE_EVT32(DRMID(crtc), sde_crtc->enabled); SDE_EVT32(DRMID(crtc), sde_crtc->enabled);
@@ -4712,28 +4719,27 @@ static int _sde_crtc_excl_dim_layer_check(struct drm_crtc_state *state,
struct plane_state pstates[], int cnt) struct plane_state pstates[], int cnt)
{ {
struct sde_crtc_state *cstate = to_sde_crtc_state(state); struct sde_crtc_state *cstate = to_sde_crtc_state(state);
struct sde_crtc *sde_crtc = to_sde_crtc(state->crtc);
struct drm_display_mode *mode = &state->adjusted_mode; struct drm_display_mode *mode = &state->adjusted_mode;
const struct drm_plane_state *pstate; const struct drm_plane_state *pstate;
struct sde_plane_state *sde_pstate; struct sde_plane_state *sde_pstate;
int rc = 0, i; int rc = 0, i;
struct sde_rect *rect;
u32 crtc_width, crtc_height;
crtc_width = sde_crtc_get_width(sde_crtc, cstate, mode);
crtc_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode);
/* Check dim layer rect bounds and stage */ /* Check dim layer rect bounds and stage */
for (i = 0; i < cstate->num_dim_layers; i++) { for (i = 0; i < cstate->num_dim_layers; i++) {
if ((CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.y, rect = &cstate->dim_layer[i].rect;
cstate->dim_layer[i].rect.h, mode->vdisplay)) || if ((CHECK_LAYER_BOUNDS(rect->y, rect->h, crtc_height)) ||
(CHECK_LAYER_BOUNDS(cstate->dim_layer[i].rect.x, (CHECK_LAYER_BOUNDS(rect->x, rect->w, crtc_width)) ||
cstate->dim_layer[i].rect.w, mode->hdisplay)) || (cstate->dim_layer[i].stage >= SDE_STAGE_MAX) || (!rect->w) || (!rect->h)) {
(cstate->dim_layer[i].stage >= SDE_STAGE_MAX) || SDE_ERROR("crtc:%d wxh:%dx%d, invalid dim_layer:{%d,%d,%d,%d}, stage:%d\n",
(!cstate->dim_layer[i].rect.w) || DRMID(state->crtc), crtc_width, crtc_height,
(!cstate->dim_layer[i].rect.h)) { rect->x, rect->y, rect->w, rect->h,
SDE_ERROR("invalid dim_layer:{%d,%d,%d,%d}, stage:%d\n",
cstate->dim_layer[i].rect.x,
cstate->dim_layer[i].rect.y,
cstate->dim_layer[i].rect.w,
cstate->dim_layer[i].rect.h,
cstate->dim_layer[i].stage); cstate->dim_layer[i].stage);
SDE_ERROR("display: %dx%d\n", mode->hdisplay,
mode->vdisplay);
rc = -E2BIG; rc = -E2BIG;
goto end; goto end;
} }
@@ -4744,9 +4750,8 @@ static int _sde_crtc_excl_dim_layer_check(struct drm_crtc_state *state,
pstate = pstates[i].drm_pstate; pstate = pstates[i].drm_pstate;
sde_pstate = to_sde_plane_state(pstate); sde_pstate = to_sde_plane_state(pstate);
SDE_DEBUG("p %d z %d src{%d,%d,%d,%d} excl_rect{%d,%d,%d,%d}\n", SDE_DEBUG("p %d z %d src{%d,%d,%d,%d} excl_rect{%d,%d,%d,%d}\n",
pstate->plane->base.id, pstates[i].stage, DRMID(pstate->plane), pstates[i].stage,
pstate->crtc_x, pstate->crtc_y, pstate->crtc_x, pstate->crtc_y, pstate->crtc_w, pstate->crtc_h,
pstate->crtc_w, pstate->crtc_h,
sde_pstate->excl_rect.x, sde_pstate->excl_rect.y, sde_pstate->excl_rect.x, sde_pstate->excl_rect.y,
sde_pstate->excl_rect.w, sde_pstate->excl_rect.h); sde_pstate->excl_rect.w, sde_pstate->excl_rect.h);
} }
@@ -5026,7 +5031,7 @@ static int _sde_crtc_check_get_pstates(struct drm_crtc *crtc,
struct sde_crtc_state *cstate; struct sde_crtc_state *cstate;
const struct drm_plane_state *pstate; const struct drm_plane_state *pstate;
const struct drm_plane_state *pipe_staged[SSPP_MAX]; const struct drm_plane_state *pipe_staged[SSPP_MAX];
int rc = 0, multirect_count = 0, i, mixer_width, mixer_height; int rc = 0, multirect_count = 0, i, crtc_width, crtc_height;
int inc_sde_stage = 0; int inc_sde_stage = 0;
struct sde_kms *kms; struct sde_kms *kms;
u32 blend_type; u32 blend_type;
@@ -5042,11 +5047,8 @@ static int _sde_crtc_check_get_pstates(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged)); memset(pipe_staged, 0, sizeof(pipe_staged));
mixer_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode); crtc_width = sde_crtc_get_width(sde_crtc, cstate, mode);
mixer_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode); crtc_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode);
if (cstate->num_ds_enabled)
mixer_width = mixer_width * cstate->num_ds_enabled;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (IS_ERR_OR_NULL(pstate)) { if (IS_ERR_OR_NULL(pstate)) {
@@ -5095,23 +5097,18 @@ static int _sde_crtc_check_get_pstates(struct drm_crtc *crtc,
(*cnt)++; (*cnt)++;
if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h, if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h, crtc_height) ||
mode->vdisplay) || CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w, crtc_width)) {
CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w, SDE_ERROR("invalid dest - y:%d h:%d crtc_h:%d x:%d w:%d crtc_w:%d\n",
mode->hdisplay)) { pstate->crtc_y, pstate->crtc_h, crtc_height,
SDE_ERROR("invalid vertical/horizontal destination\n"); pstate->crtc_x, pstate->crtc_w, crtc_width);
SDE_ERROR("y:%d h:%d vdisp:%d x:%d w:%d hdisp:%d\n",
pstate->crtc_y, pstate->crtc_h, mode->vdisplay,
pstate->crtc_x, pstate->crtc_w, mode->hdisplay);
return -E2BIG; return -E2BIG;
} }
if (blend_type != SDE_DRM_BLEND_OP_SKIP && cstate->num_ds_enabled && if (blend_type != SDE_DRM_BLEND_OP_SKIP && cstate->num_ds_enabled &&
((pstate->crtc_h > mixer_height) || ((pstate->crtc_h > crtc_height) || (pstate->crtc_w > crtc_width))) {
(pstate->crtc_w > mixer_width))) {
SDE_ERROR("plane w/h:%x*%x > mixer w/h:%x*%x\n", SDE_ERROR("plane w/h:%x*%x > mixer w/h:%x*%x\n",
pstate->crtc_w, pstate->crtc_h, pstate->crtc_w, pstate->crtc_h, crtc_width, crtc_height);
mixer_width, mixer_height);
return -E2BIG; return -E2BIG;
} }
} }
@@ -5295,6 +5292,9 @@ static int _sde_crtc_check_plane_layout(struct drm_crtc *crtc,
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *plane_state; struct drm_plane_state *plane_state;
struct sde_plane_state *pstate; struct sde_plane_state *pstate;
struct drm_display_mode *mode;
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int layout_split; int layout_split;
kms = _sde_crtc_get_kms(crtc); kms = _sde_crtc_get_kms(crtc);
@@ -5308,6 +5308,9 @@ static int _sde_crtc_check_plane_layout(struct drm_crtc *crtc,
SDE_RM_TOPOLOGY_GROUP_QUADPIPE)) SDE_RM_TOPOLOGY_GROUP_QUADPIPE))
return 0; return 0;
mode = &crtc->state->adjusted_mode;
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(crtc->state);
drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
plane_state = drm_atomic_get_existing_plane_state( plane_state = drm_atomic_get_existing_plane_state(
crtc_state->state, plane); crtc_state->state, plane);
@@ -5315,7 +5318,7 @@ static int _sde_crtc_check_plane_layout(struct drm_crtc *crtc,
continue; continue;
pstate = to_sde_plane_state(plane_state); pstate = to_sde_plane_state(plane_state);
layout_split = crtc_state->mode.hdisplay >> 1; layout_split = sde_crtc_get_width(sde_crtc, cstate, mode) >> 1;
if (plane_state->crtc_x >= layout_split) { if (plane_state->crtc_x >= layout_split) {
plane_state->crtc_x -= layout_split; plane_state->crtc_x -= layout_split;
@@ -5879,7 +5882,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
ARRAY_SIZE(e_secure_level), 0, ARRAY_SIZE(e_secure_level), 0,
CRTC_PROP_SECURITY_LEVEL); CRTC_PROP_SECURITY_LEVEL);
if (test_bit(SDE_FEATURE_SYSCACHE, catalog->features)) if (catalog->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache)
msm_property_install_enum(&sde_crtc->property_info, "cache_state", msm_property_install_enum(&sde_crtc->property_info, "cache_state",
0x0, 0, e_cache_state, 0x0, 0, e_cache_state,
ARRAY_SIZE(e_cache_state), 0, ARRAY_SIZE(e_cache_state), 0,
@@ -6317,15 +6320,13 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
struct sde_crtc *sde_crtc; struct sde_crtc *sde_crtc;
struct sde_plane_state *pstate = NULL; struct sde_plane_state *pstate = NULL;
struct sde_crtc_mixer *m; struct sde_crtc_mixer *m;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_display_mode *mode; struct drm_display_mode *mode;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_plane_state *state; struct drm_plane_state *state;
struct sde_crtc_state *cstate; struct sde_crtc_state *cstate;
int i, mixer_width, mixer_height;
int i, out_width, out_height;
if (!s || !s->private) if (!s || !s->private)
return -EINVAL; return -EINVAL;
@@ -6336,11 +6337,11 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
mutex_lock(&sde_crtc->crtc_lock); mutex_lock(&sde_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode; mode = &crtc->state->adjusted_mode;
out_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode); mixer_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode);
out_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode); mixer_height = sde_crtc_get_mixer_height(sde_crtc, cstate, mode);
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, seq_printf(s, "crtc:%d width:%d height:%d\n", DRMID(crtc),
mode->hdisplay, mode->vdisplay); mixer_width * sde_crtc->num_mixers, mixer_height);
seq_puts(s, "\n"); seq_puts(s, "\n");
@@ -6353,7 +6354,7 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
else else
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0, m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
out_width, out_height); mixer_width, mixer_height);
} }
seq_puts(s, "\n"); seq_puts(s, "\n");
@@ -7019,8 +7020,8 @@ void sde_crtc_static_img_control(struct drm_crtc *crtc,
return; return;
} }
if (!test_bit(SDE_FEATURE_SYSCACHE, sde_kms->catalog->features)) { if (!sde_kms->catalog->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache) {
SDE_DEBUG("syscache not supported\n"); SDE_DEBUG("DISP syscache not supported\n");
return; return;
} }

View File

@@ -599,6 +599,25 @@ static inline int sde_crtc_get_mixer_height(struct sde_crtc *sde_crtc,
cstate->ds_cfg[0].lm_height : mode->vdisplay); cstate->ds_cfg[0].lm_height : mode->vdisplay);
} }
/**
* sde_crtc_get_width - get the correct crtc width based on the features enabled
*/
static inline int sde_crtc_get_width(struct sde_crtc *sde_crtc,
struct sde_crtc_state *cstate, struct drm_display_mode *mode)
{
u32 width;
if (!sde_crtc || !cstate || !mode)
return 0;
if (cstate->num_ds_enabled)
width = cstate->ds_cfg[0].lm_width * cstate->num_ds_enabled;
else
width = mode->hdisplay;
return width;
}
/** /**
* sde_crtc_frame_pending - retun the number of pending frames * sde_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object * @crtc: Pointer to drm crtc object

View File

@@ -923,11 +923,11 @@ static int _sde_encoder_atomic_check_phys_enc(struct sde_encoder_virt *sde_enc,
} }
static int _sde_encoder_atomic_check_pu_roi(struct sde_encoder_virt *sde_enc, static int _sde_encoder_atomic_check_pu_roi(struct sde_encoder_virt *sde_enc,
struct drm_crtc_state *crtc_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state,
struct drm_connector_state *conn_state, struct sde_connector_state *sde_conn_state, struct sde_crtc_state *sde_crtc_state)
struct sde_connector_state *sde_conn_state,
struct sde_crtc_state *sde_crtc_state)
{ {
struct sde_crtc *sde_crtc = to_sde_crtc(crtc_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int ret = 0; int ret = 0;
if (crtc_state->mode_changed || crtc_state->active_changed) { if (crtc_state->mode_changed || crtc_state->active_changed) {
@@ -935,12 +935,11 @@ static int _sde_encoder_atomic_check_pu_roi(struct sde_encoder_virt *sde_enc,
mode_roi.x = 0; mode_roi.x = 0;
mode_roi.y = 0; mode_roi.y = 0;
mode_roi.w = crtc_state->adjusted_mode.hdisplay; mode_roi.w = sde_crtc_get_width(sde_crtc, sde_crtc_state, mode);
mode_roi.h = crtc_state->adjusted_mode.vdisplay; mode_roi.h = sde_crtc_get_mixer_height(sde_crtc, sde_crtc_state, mode);
if (sde_conn_state->rois.num_rects) { if (sde_conn_state->rois.num_rects) {
sde_kms_rect_merge_rectangles( sde_kms_rect_merge_rectangles(&sde_conn_state->rois, &roi);
&sde_conn_state->rois, &roi);
if (!sde_kms_rect_is_equal(&mode_roi, &roi)) { if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
SDE_ERROR_ENC(sde_enc, SDE_ERROR_ENC(sde_enc,
"roi (%d,%d,%d,%d) on connector invalid during modeset\n", "roi (%d,%d,%d,%d) on connector invalid during modeset\n",
@@ -950,8 +949,7 @@ static int _sde_encoder_atomic_check_pu_roi(struct sde_encoder_virt *sde_enc,
} }
if (sde_crtc_state->user_roi_list.num_rects) { if (sde_crtc_state->user_roi_list.num_rects) {
sde_kms_rect_merge_rectangles( sde_kms_rect_merge_rectangles(&sde_crtc_state->user_roi_list, &roi);
&sde_crtc_state->user_roi_list, &roi);
if (!sde_kms_rect_is_equal(&mode_roi, &roi)) { if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
SDE_ERROR_ENC(sde_enc, SDE_ERROR_ENC(sde_enc,
"roi (%d,%d,%d,%d) on crtc invalid during modeset\n", "roi (%d,%d,%d,%d) on crtc invalid during modeset\n",
@@ -3417,13 +3415,9 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
SDE_ERROR("invalid encoder\n"); SDE_ERROR("invalid encoder\n");
return; return;
} }
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc), enable);
if (sde_encoder_in_clone_mode(drm_enc)) { SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR); SDE_EVT32(DRMID(drm_enc), enable);
return;
}
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
sde_enc->crtc_vblank_cb = vbl_cb; sde_enc->crtc_vblank_cb = vbl_cb;
@@ -3607,8 +3601,10 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
/* update pending counts and trigger kickoff ctl flush atomically */ /* update pending counts and trigger kickoff ctl flush atomically */
spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
if (phys->ops.is_master && phys->ops.is_master(phys) && config_changed) if (phys->ops.is_master && phys->ops.is_master(phys) && config_changed) {
atomic_inc(&phys->pending_retire_fence_cnt); atomic_inc(&phys->pending_retire_fence_cnt);
atomic_inc(&phys->pending_ctl_start_cnt);
}
pend_ret_fence_cnt = atomic_read(&phys->pending_retire_fence_cnt); pend_ret_fence_cnt = atomic_read(&phys->pending_retire_fence_cnt);
@@ -4284,7 +4280,7 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
struct sde_encoder_kickoff_params *params) struct sde_encoder_kickoff_params *params)
{ {
struct sde_encoder_virt *sde_enc; struct sde_encoder_virt *sde_enc;
struct sde_encoder_phys *phys; struct sde_encoder_phys *phys, *cur_master;
struct sde_kms *sde_kms = NULL; struct sde_kms *sde_kms = NULL;
struct sde_crtc *sde_crtc; struct sde_crtc *sde_crtc;
bool needs_hw_reset = false, is_cmd_mode; bool needs_hw_reset = false, is_cmd_mode;
@@ -4307,12 +4303,11 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
SDE_DEBUG_ENC(sde_enc, "\n"); SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc)); SDE_EVT32(DRMID(drm_enc));
is_cmd_mode = sde_encoder_check_curr_mode(drm_enc, cur_master = sde_enc->cur_master;
MSM_DISPLAY_CMD_MODE); is_cmd_mode = sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE);
if (sde_enc->cur_master && sde_enc->cur_master->connector if (cur_master && cur_master->connector)
&& is_cmd_mode) sde_enc->frame_trigger_mode =
sde_enc->frame_trigger_mode = sde_connector_get_property( sde_connector_get_property(cur_master->connector->state,
sde_enc->cur_master->connector->state,
CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE); CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
_sde_encoder_helper_hdr_plus_mempool_update(sde_enc); _sde_encoder_helper_hdr_plus_mempool_update(sde_enc);

View File

@@ -205,6 +205,7 @@ struct sde_encoder_phys_ops {
* @INTR_IDX_AUTOREFRESH_DONE: Autorefresh done for cmd mode panel meaning * @INTR_IDX_AUTOREFRESH_DONE: Autorefresh done for cmd mode panel meaning
* autorefresh has triggered a double buffer flip * autorefresh has triggered a double buffer flip
* @INTR_IDX_WRPTR: Writepointer start interrupt for cmd mode panel * @INTR_IDX_WRPTR: Writepointer start interrupt for cmd mode panel
* @INTR_IDX_WB_LINEPTR: Programmable lineptr interrupt for WB
*/ */
enum sde_intr_idx { enum sde_intr_idx {
INTR_IDX_VSYNC, INTR_IDX_VSYNC,
@@ -221,6 +222,7 @@ enum sde_intr_idx {
INTR_IDX_PP5_OVFL, INTR_IDX_PP5_OVFL,
INTR_IDX_PP_CWB_OVFL, INTR_IDX_PP_CWB_OVFL,
INTR_IDX_WRPTR, INTR_IDX_WRPTR,
INTR_IDX_WB_LINEPTR,
INTR_IDX_MAX, INTR_IDX_MAX,
}; };
@@ -286,6 +288,10 @@ struct sde_encoder_irq {
* scheduled. Decremented in irq handler * scheduled. Decremented in irq handler
* @pending_retire_fence_cnt: Atomic counter tracking the pending retire * @pending_retire_fence_cnt: Atomic counter tracking the pending retire
* fences that have to be signalled. * fences that have to be signalled.
* @pending_ctl_start_cnt: Atomic counter tracking the pending ctl-start-irq,
* used to release commit thread. Currently managed
* only for writeback encoder and the counter keeps
* increasing for other type of encoders.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
* @kickoff_timeout_ms: kickoff timeout in mill seconds * @kickoff_timeout_ms: kickoff timeout in mill seconds
* @irq: IRQ tracking structures * @irq: IRQ tracking structures
@@ -333,6 +339,7 @@ struct sde_encoder_phys {
atomic_t underrun_cnt; atomic_t underrun_cnt;
atomic_t pending_kickoff_cnt; atomic_t pending_kickoff_cnt;
atomic_t pending_retire_fence_cnt; atomic_t pending_retire_fence_cnt;
atomic_t pending_ctl_start_cnt;
wait_queue_head_t pending_kickoff_wq; wait_queue_head_t pending_kickoff_wq;
u32 kickoff_timeout_ms; u32 kickoff_timeout_ms;
struct sde_encoder_irq irq[INTR_IDX_MAX]; struct sde_encoder_irq irq[INTR_IDX_MAX];
@@ -423,13 +430,14 @@ struct sde_encoder_phys_cmd {
* @wb_fmt: Writeback pixel format * @wb_fmt: Writeback pixel format
* @wb_fb: Pointer to current writeback framebuffer * @wb_fb: Pointer to current writeback framebuffer
* @wb_aspace: Pointer to current writeback address space * @wb_aspace: Pointer to current writeback address space
* @cwb_old_fb: Pointer to old writeback framebuffer * @old_fb: Pointer to old writeback framebuffer
* @cwb_old_aspace: Pointer to old writeback address space * @old_aspace: Pointer to old writeback address space
* @aspace: address space identifier for non-secure/secure domain * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device * @wb_dev: Pointer to writeback device
* @bo_disable: Buffer object(s) to use during the disabling state * @bo_disable: Buffer object(s) to use during the disabling state
* @fb_disable: Frame buffer to use during the disabling state * @fb_disable: Frame buffer to use during the disabling state
* @crtc Pointer to drm_crtc * @crtc Pointer to drm_crtc
* @prog_line: Cached programmable line value used to trigger early wb-fence
*/ */
struct sde_encoder_phys_wb { struct sde_encoder_phys_wb {
struct sde_encoder_phys base; struct sde_encoder_phys base;
@@ -441,13 +449,14 @@ struct sde_encoder_phys_wb {
const struct sde_format *wb_fmt; const struct sde_format *wb_fmt;
struct drm_framebuffer *wb_fb; struct drm_framebuffer *wb_fb;
struct msm_gem_address_space *wb_aspace; struct msm_gem_address_space *wb_aspace;
struct drm_framebuffer *cwb_old_fb; struct drm_framebuffer *old_fb;
struct msm_gem_address_space *cwb_old_aspace; struct msm_gem_address_space *old_aspace;
struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX]; struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev; struct sde_wb_device *wb_dev;
struct drm_gem_object *bo_disable[SDE_MAX_PLANES]; struct drm_gem_object *bo_disable[SDE_MAX_PLANES];
struct drm_framebuffer *fb_disable; struct drm_framebuffer *fb_disable;
struct drm_crtc *crtc; struct drm_crtc *crtc;
u32 prog_line;
}; };
/** /**

View File

@@ -1708,13 +1708,10 @@ static void _sde_encoder_autorefresh_disable_seq1(
/* /*
* If autorefresh is enabled, disable it and make sure it is safe to * If autorefresh is enabled, disable it and make sure it is safe to
* proceed with current frame commit/push. Sequence fallowed is, * proceed with current frame commit/push. Sequence fallowed is,
* 1. Disable TE - caller will take care of it * 1. Disable TE & autorefresh - caller will take care of it
* 2. Disable autorefresh config * 2. Poll for frame transfer ongoing to be false
* 4. Poll for frame transfer ongoing to be false * 3. Enable TE back - caller will take care of it
* 5. Enable TE back - caller will take care of it
*/ */
_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
do { do {
udelay(AUTOREFRESH_SEQ1_POLL_TIME); udelay(AUTOREFRESH_SEQ1_POLL_TIME);
if ((trial * AUTOREFRESH_SEQ1_POLL_TIME) if ((trial * AUTOREFRESH_SEQ1_POLL_TIME)
@@ -1806,13 +1803,10 @@ static void _sde_encoder_autorefresh_disable_seq2(
static void sde_encoder_phys_cmd_prepare_commit( static void sde_encoder_phys_cmd_prepare_commit(
struct sde_encoder_phys *phys_enc) struct sde_encoder_phys *phys_enc)
{ {
struct sde_encoder_phys_cmd *cmd_enc = struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
to_sde_encoder_phys_cmd(phys_enc); struct sde_kms *sde_kms = phys_enc->sde_kms;
if (!phys_enc) if (!phys_enc || !sde_encoder_phys_cmd_is_master(phys_enc))
return;
if (!sde_encoder_phys_cmd_is_master(phys_enc))
return; return;
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0, SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
@@ -1822,8 +1816,12 @@ static void sde_encoder_phys_cmd_prepare_commit(
return; return;
sde_encoder_phys_cmd_connect_te(phys_enc, false); sde_encoder_phys_cmd_connect_te(phys_enc, false);
_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
if (sde_kms && sde_kms->catalog &&
(sde_kms->catalog->autorefresh_disable_seq == AUTOREFRESH_DISABLE_SEQ1)) {
_sde_encoder_autorefresh_disable_seq1(phys_enc); _sde_encoder_autorefresh_disable_seq1(phys_enc);
_sde_encoder_autorefresh_disable_seq2(phys_enc); _sde_encoder_autorefresh_disable_seq2(phys_enc);
}
sde_encoder_phys_cmd_connect_te(phys_enc, true); sde_encoder_phys_cmd_connect_te(phys_enc, true);
SDE_DEBUG_CMDENC(cmd_enc, "autorefresh disabled successfully\n"); SDE_DEBUG_CMDENC(cmd_enc, "autorefresh disabled successfully\n");

View File

@@ -682,7 +682,7 @@ static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
int out_width = 0, out_height = 0; int out_width = 0, out_height = 0;
int ds_srcw = 0, ds_srch = 0, ds_outw = 0, ds_outh = 0; int ds_srcw = 0, ds_srch = 0, ds_outw = 0, ds_outh = 0;
const struct sde_format *fmt; const struct sde_format *fmt;
int data_pt; int data_pt, prog_line;
int ds_in_use = false; int ds_in_use = false;
int i = 0; int i = 0;
int ret = 0; int ret = 0;
@@ -710,6 +710,12 @@ static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
return -EINVAL; return -EINVAL;
} }
prog_line = sde_connector_get_property(conn_state, CONNECTOR_PROP_EARLY_FENCE_LINE);
if (prog_line) {
SDE_ERROR("early fence not supported with CWB, prog_line:%d\n", prog_line);
return -EINVAL;
}
data_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT); data_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT);
/* compute cumulative ds output dimensions if in use */ /* compute cumulative ds output dimensions if in use */
@@ -880,14 +886,7 @@ static int sde_encoder_phys_wb_atomic_check(
return -EINVAL; return -EINVAL;
} }
SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->format->format, SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->format->format, fb->modifier);
fb->modifier);
if (SDE_FORMAT_IS_YUV(fmt) &&
!(wb_cfg->features & BIT(SDE_WB_YUV_CONFIG))) {
SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
return -EINVAL;
}
if (fmt->chroma_sample == SDE_CHROMA_H2V1 || if (fmt->chroma_sample == SDE_CHROMA_H2V1 ||
fmt->chroma_sample == SDE_CHROMA_H1V2) { fmt->chroma_sample == SDE_CHROMA_H1V2) {
@@ -1146,6 +1145,24 @@ static void _sde_encoder_phys_wb_update_flush(struct sde_encoder_phys *phys_enc)
hw_wb->idx - WB_0); hw_wb->idx - WB_0);
} }
static void _sde_encoder_phys_wb_setup_prog_line(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
struct sde_wb_device *wb_dev = wb_enc->wb_dev;
struct drm_connector_state *state = wb_dev->connector->state;
struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
u32 prog_line;
if (phys_enc->in_clone_mode || !hw_wb->ops.set_prog_line_count)
return;
prog_line = sde_connector_get_property(state, CONNECTOR_PROP_EARLY_FENCE_LINE);
if (wb_enc->prog_line != prog_line) {
wb_enc->prog_line = prog_line;
hw_wb->ops.set_prog_line_count(hw_wb, prog_line);
}
}
/** /**
* sde_encoder_phys_wb_setup - setup writeback encoder * sde_encoder_phys_wb_setup - setup writeback encoder
* @phys_enc: Pointer to physical encoder * @phys_enc: Pointer to physical encoder
@@ -1220,42 +1237,81 @@ static void sde_encoder_phys_wb_setup(
sde_encoder_phys_wb_setup_cdp(phys_enc, wb_enc->wb_fmt); sde_encoder_phys_wb_setup_cdp(phys_enc, wb_enc->wb_fmt);
_sde_encoder_phys_wb_setup_cwb(phys_enc, true); _sde_encoder_phys_wb_setup_cwb(phys_enc, true);
_sde_encoder_phys_wb_setup_prog_line(phys_enc);
}
static void sde_encoder_phys_wb_ctl_start_irq(void *arg, int irq_idx)
{
struct sde_encoder_phys_wb *wb_enc = arg;
struct sde_encoder_phys *phys_enc;
struct sde_hw_wb *hw_wb;
u32 line_cnt = 0;
if (!wb_enc)
return;
phys_enc = &wb_enc->base;
if (atomic_add_unless(&phys_enc->pending_ctl_start_cnt, -1, 0))
wake_up_all(&phys_enc->pending_kickoff_wq);
hw_wb = wb_enc->hw_wb;
if (hw_wb->ops.get_line_count)
line_cnt = hw_wb->ops.get_line_count(hw_wb);
SDE_EVT32_IRQ(DRMID(phys_enc->parent), WBID(wb_enc), line_cnt);
} }
static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error) static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error)
{ {
struct sde_encoder_phys_wb *wb_enc = arg; struct sde_encoder_phys_wb *wb_enc = arg;
struct sde_encoder_phys *phys_enc = &wb_enc->base; struct sde_encoder_phys *phys_enc = &wb_enc->base;
struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
u32 event = frame_error ? SDE_ENCODER_FRAME_EVENT_ERROR : 0; u32 event = frame_error ? SDE_ENCODER_FRAME_EVENT_ERROR : 0;
u32 ubwc_error = 0;
/* don't notify upper layer for internal commit */ /* don't notify upper layer for internal commit */
if (phys_enc->enable_state == SDE_ENC_DISABLING && if (phys_enc->enable_state == SDE_ENC_DISABLING && !phys_enc->in_clone_mode)
!phys_enc->in_clone_mode) goto end;
goto complete;
if (phys_enc->parent_ops.handle_frame_done && if (phys_enc->parent_ops.handle_frame_done &&
atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) { atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0)) {
event |= SDE_ENCODER_FRAME_EVENT_DONE | event |= SDE_ENCODER_FRAME_EVENT_DONE;
SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
/*
* signal retire-fence during wb-done
* - when prog_line is not configured
* - when prog_line is configured and line-ptr-irq is missed
*/
if (!wb_enc->prog_line || (wb_enc->prog_line &&
(atomic_read(&phys_enc->pending_kickoff_cnt) <
atomic_read(&phys_enc->pending_retire_fence_cnt)))) {
atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0);
event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
}
if (phys_enc->in_clone_mode) if (phys_enc->in_clone_mode)
event |= SDE_ENCODER_FRAME_EVENT_CWB_DONE; event |= SDE_ENCODER_FRAME_EVENT_CWB_DONE
| SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
else else
event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE; event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
phys_enc->parent_ops.handle_frame_done(phys_enc->parent, phys_enc->parent_ops.handle_frame_done(phys_enc->parent, phys_enc, event);
phys_enc, event);
} }
if (!phys_enc->in_clone_mode && phys_enc->parent_ops.handle_vblank_virt) if (!phys_enc->in_clone_mode && phys_enc->parent_ops.handle_vblank_virt)
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent, phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent, phys_enc);
phys_enc);
SDE_EVT32_IRQ(DRMID(phys_enc->parent), hw_wb->idx - WB_0, event, end:
frame_error); if (frame_error && wb_enc->hw_wb->ops.get_ubwc_error
&& wb_enc->hw_wb->ops.clear_ubwc_error) {
wb_enc->hw_wb->ops.get_ubwc_error(wb_enc->hw_wb);
wb_enc->hw_wb->ops.clear_ubwc_error(wb_enc->hw_wb);
}
SDE_EVT32_IRQ(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode,
phys_enc->enable_state, event, atomic_read(&phys_enc->pending_kickoff_cnt),
atomic_read(&phys_enc->pending_retire_fence_cnt),
ubwc_error, frame_error);
complete:
wake_up_all(&phys_enc->pending_kickoff_wq); wake_up_all(&phys_enc->pending_kickoff_wq);
} }
@@ -1279,6 +1335,30 @@ static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
_sde_encoder_phys_wb_frame_done_helper(arg, false); _sde_encoder_phys_wb_frame_done_helper(arg, false);
} }
static void sde_encoder_phys_wb_lineptr_irq(void *arg, int irq_idx)
{
struct sde_encoder_phys_wb *wb_enc = arg;
struct sde_encoder_phys *phys_enc;
struct sde_hw_wb *hw_wb;
u32 event = 0, line_cnt = 0;
if (!wb_enc || !wb_enc->prog_line)
return;
phys_enc = &wb_enc->base;
if (phys_enc->parent_ops.handle_frame_done &&
atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) {
event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
phys_enc->parent_ops.handle_frame_done(phys_enc->parent, phys_enc, event);
}
hw_wb = wb_enc->hw_wb;
if (hw_wb->ops.get_line_count)
line_cnt = hw_wb->ops.get_line_count(hw_wb);
SDE_EVT32_IRQ(DRMID(phys_enc->parent), WBID(wb_enc), event, wb_enc->prog_line, line_cnt);
}
/** /**
* sde_encoder_phys_wb_irq_ctrl - irq control of WB * sde_encoder_phys_wb_irq_ctrl - irq control of WB
* @phys: Pointer to physical encoder * @phys: Pointer to physical encoder
@@ -1319,12 +1399,20 @@ static void sde_encoder_phys_wb_irq_ctrl(
if (enable && atomic_inc_return(&phys->wbirq_refcount) == 1) { if (enable && atomic_inc_return(&phys->wbirq_refcount) == 1) {
sde_encoder_helper_register_irq(phys, INTR_IDX_WB_DONE); sde_encoder_helper_register_irq(phys, INTR_IDX_WB_DONE);
sde_encoder_helper_register_irq(phys, INTR_IDX_CTL_START);
if (test_bit(SDE_WB_PROG_LINE, &wb_cfg->features))
sde_encoder_helper_register_irq(phys, INTR_IDX_WB_LINEPTR);
for (index = 0; index < max_num_of_irqs; index++) for (index = 0; index < max_num_of_irqs; index++)
if (irq_table[index + pp] != SDE_NONE) if (irq_table[index + pp] != SDE_NONE)
sde_encoder_helper_register_irq(phys, irq_table[index + pp]); sde_encoder_helper_register_irq(phys, irq_table[index + pp]);
} else if (!enable && atomic_dec_return(&phys->wbirq_refcount) == 0) { } else if (!enable && atomic_dec_return(&phys->wbirq_refcount) == 0) {
sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE); sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE);
sde_encoder_helper_unregister_irq(phys, INTR_IDX_CTL_START);
if (test_bit(SDE_WB_PROG_LINE, &wb_cfg->features))
sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_LINEPTR);
for (index = 0; index < max_num_of_irqs; index++) for (index = 0; index < max_num_of_irqs; index++)
if (irq_table[index + pp] != SDE_NONE) if (irq_table[index + pp] != SDE_NONE)
@@ -1348,6 +1436,7 @@ static void sde_encoder_phys_wb_mode_set(
struct sde_hw_wb *hw_wb = wb_enc->hw_wb; struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
struct sde_rm_hw_iter iter; struct sde_rm_hw_iter iter;
int i, instance; int i, instance;
struct sde_encoder_irq *irq;
phys_enc->cached_mode = *adj_mode; phys_enc->cached_mode = *adj_mode;
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0; instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
@@ -1391,37 +1480,14 @@ static void sde_encoder_phys_wb_mode_set(
phys_enc->kickoff_timeout_ms = phys_enc->kickoff_timeout_ms =
sde_encoder_helper_get_kickoff_timeout_ms(phys_enc->parent); sde_encoder_helper_get_kickoff_timeout_ms(phys_enc->parent);
/* set ctl idx for ctl-start-irq */
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->hw_idx = phys_enc->hw_ctl->idx;
} }
static int sde_encoder_phys_wb_frame_timeout(struct sde_encoder_phys *phys_enc) static bool _sde_encoder_phys_wb_is_idle(struct sde_encoder_phys *phys_enc)
{ {
u32 event = 0;
while (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0) &&
phys_enc->parent_ops.handle_frame_done) {
event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
| SDE_ENCODER_FRAME_EVENT_ERROR;
if (phys_enc->in_clone_mode)
event |= SDE_ENCODER_FRAME_EVENT_CWB_DONE;
else
event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
phys_enc->parent_ops.handle_frame_done(
phys_enc->parent, phys_enc, event);
SDE_EVT32(DRMID(phys_enc->parent), event,
atomic_read(&phys_enc->pending_retire_fence_cnt));
}
return event;
}
static bool _sde_encoder_phys_wb_is_idle(
struct sde_encoder_phys *phys_enc)
{
bool ret = false;
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
struct sde_hw_wb *hw_wb = wb_enc->hw_wb; struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
struct sde_vbif_get_xin_status_params xin_status = {0}; struct sde_vbif_get_xin_status_params xin_status = {0};
@@ -1429,32 +1495,39 @@ static bool _sde_encoder_phys_wb_is_idle(
xin_status.vbif_idx = hw_wb->caps->vbif_idx; xin_status.vbif_idx = hw_wb->caps->vbif_idx;
xin_status.xin_id = hw_wb->caps->xin_id; xin_status.xin_id = hw_wb->caps->xin_id;
xin_status.clk_ctrl = hw_wb->caps->clk_ctrl; xin_status.clk_ctrl = hw_wb->caps->clk_ctrl;
if (sde_vbif_get_xin_status(phys_enc->sde_kms, &xin_status)) {
_sde_encoder_phys_wb_frame_done_helper(wb_enc, false);
ret = true;
}
return ret; return sde_vbif_get_xin_status(phys_enc->sde_kms, &xin_status);
} }
static void _sde_encoder_phys_wb_reset_state(
struct sde_encoder_phys *phys_enc) static void _sde_encoder_phys_wb_reset_state(struct sde_encoder_phys *phys_enc)
{ {
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
phys_enc->enable_state = SDE_ENC_DISABLED; phys_enc->enable_state = SDE_ENC_DISABLED;
/* cleanup any pending buffer */
if (wb_enc->wb_fb && wb_enc->wb_aspace) {
msm_framebuffer_cleanup(wb_enc->wb_fb, wb_enc->wb_aspace);
drm_framebuffer_put(wb_enc->wb_fb);
wb_enc->wb_fb = NULL;
wb_enc->wb_aspace = NULL;
}
wb_enc->crtc = NULL; wb_enc->crtc = NULL;
phys_enc->hw_cdm = NULL; phys_enc->hw_cdm = NULL;
phys_enc->hw_ctl = NULL; phys_enc->hw_ctl = NULL;
phys_enc->in_clone_mode = false; phys_enc->in_clone_mode = false;
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
atomic_set(&phys_enc->pending_ctl_start_cnt, 0);
} }
static int _sde_encoder_phys_wb_wait_for_commit_done( static int _sde_encoder_phys_wb_wait_for_idle(struct sde_encoder_phys *phys_enc, bool force_wait)
struct sde_encoder_phys *phys_enc, bool is_disable)
{ {
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
u32 event = 0;
int rc = 0;
struct sde_encoder_wait_info wait_info = {0}; struct sde_encoder_wait_info wait_info = {0};
int rc = 0;
bool is_idle;
/* Return EWOULDBLOCK since we know the wait isn't necessary */ /* Return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == SDE_ENC_DISABLED) { if (phys_enc->enable_state == SDE_ENC_DISABLED) {
@@ -1463,56 +1536,73 @@ static int _sde_encoder_phys_wb_wait_for_commit_done(
} }
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode, SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode,
atomic_read(&phys_enc->pending_retire_fence_cnt), atomic_read(&phys_enc->pending_kickoff_cnt), force_wait);
!!wb_enc->wb_fb, is_disable);
if (!is_disable && phys_enc->in_clone_mode && if (!force_wait && phys_enc->in_clone_mode
(atomic_read(&phys_enc->pending_retire_fence_cnt) <= 1)) && (atomic_read(&phys_enc->pending_kickoff_cnt) <= 1))
goto skip_wait; return 0;
/* signal completion if commit with no framebuffer */ /*
if (!wb_enc->wb_fb) { * signal completion if commit with no framebuffer
SDE_DEBUG("no output framebuffer\n"); * handle frame-done when WB HW is idle
_sde_encoder_phys_wb_frame_done_helper(wb_enc, false); */
is_idle = _sde_encoder_phys_wb_is_idle(phys_enc);
if (!wb_enc->wb_fb || is_idle) {
SDE_EVT32((phys_enc->parent), WBID(wb_enc), !wb_enc->wb_fb, is_idle,
SDE_EVTLOG_FUNC_CASE1);
goto frame_done;
} }
if (atomic_read(&phys_enc->pending_retire_fence_cnt) > 1) if (atomic_read(&phys_enc->pending_kickoff_cnt) > 1)
wait_info.count_check = 1; wait_info.count_check = 1;
wait_info.wq = &phys_enc->pending_kickoff_wq; wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_retire_fence_cnt; wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = max_t(u32, wb_enc->wbdone_timeout, wait_info.timeout_ms = max_t(u32, wb_enc->wbdone_timeout, phys_enc->kickoff_timeout_ms);
phys_enc->kickoff_timeout_ms);
rc = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE, rc = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE, &wait_info);
&wait_info); if (rc == -ETIMEDOUT) {
if (rc == -ETIMEDOUT && _sde_encoder_phys_wb_is_idle(phys_enc)) { /* handle frame-done when WB HW is idle */
if (_sde_encoder_phys_wb_is_idle(phys_enc))
rc = 0; rc = 0;
} else if (rc == -ETIMEDOUT) {
SDE_ERROR("caller:%pS - wb:%d, clone_mode:%d kickoff timed out\n",
__builtin_return_address(0), WBID(wb_enc), phys_enc->in_clone_mode);
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
atomic_read(&phys_enc->pending_retire_fence_cnt), SDE_EVTLOG_ERROR); atomic_read(&phys_enc->pending_kickoff_cnt), SDE_EVTLOG_ERROR);
SDE_ERROR("wb:%d clone_mode:%d, kickoff timed out\n", goto frame_done;
WBID(wb_enc), phys_enc->in_clone_mode);
event = sde_encoder_phys_wb_frame_timeout(phys_enc);
} }
/* cleanup writeback framebuffer */ return 0;
if (wb_enc->wb_fb && wb_enc->wb_aspace) {
msm_framebuffer_cleanup(wb_enc->wb_fb, wb_enc->wb_aspace);
drm_framebuffer_put(wb_enc->wb_fb);
wb_enc->wb_fb = NULL;
wb_enc->wb_aspace = NULL;
}
skip_wait: frame_done:
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), event, rc); _sde_encoder_phys_wb_frame_done_helper(wb_enc, rc ? true : false);
return rc;
}
/* cleanup previous buffer if pending */ static int _sde_encoder_phys_wb_wait_for_ctl_start(struct sde_encoder_phys *phys_enc)
if (wb_enc->cwb_old_fb && wb_enc->cwb_old_aspace) { {
msm_framebuffer_cleanup(wb_enc->cwb_old_fb, wb_enc->cwb_old_aspace); struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
drm_framebuffer_put(wb_enc->cwb_old_fb); struct sde_encoder_wait_info wait_info = {0};
wb_enc->cwb_old_fb = NULL; int rc = 0;
wb_enc->cwb_old_aspace = NULL;
if (!atomic_read(&phys_enc->pending_ctl_start_cnt))
return 0;
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode,
atomic_read(&phys_enc->pending_kickoff_cnt),
atomic_read(&phys_enc->pending_retire_fence_cnt),
atomic_read(&phys_enc->pending_ctl_start_cnt));
wait_info.wq = &phys_enc->pending_kickoff_wq;
wait_info.atomic_cnt = &phys_enc->pending_ctl_start_cnt;
wait_info.timeout_ms = max_t(u32, wb_enc->wbdone_timeout, phys_enc->kickoff_timeout_ms);
rc = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START, &wait_info);
if (rc == -ETIMEDOUT) {
atomic_add_unless(&phys_enc->pending_ctl_start_cnt, -1, 0);
SDE_ERROR("wb:%d ctl_start timed out\n", WBID(wb_enc));
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), SDE_EVTLOG_ERROR);
} }
return rc; return rc;
@@ -1522,30 +1612,65 @@ skip_wait:
* sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed * sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed
* @phys_enc: Pointer to physical encoder * @phys_enc: Pointer to physical encoder
*/ */
static int sde_encoder_phys_wb_wait_for_commit_done( static int sde_encoder_phys_wb_wait_for_commit_done(struct sde_encoder_phys *phys_enc)
struct sde_encoder_phys *phys_enc)
{ {
int rc; struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
int rc, pending_cnt, i;
bool is_idle;
if (phys_enc->enable_state == SDE_ENC_DISABLING && /* CWB - wait for previous frame completion */
phys_enc->in_clone_mode) { if (phys_enc->in_clone_mode) {
rc = _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true); rc = _sde_encoder_phys_wb_wait_for_idle(phys_enc, false);
_sde_encoder_phys_wb_reset_state(phys_enc); goto end;
sde_encoder_phys_wb_irq_ctrl(phys_enc, false); }
} else {
rc = _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, false); /*
* WB - wait for ctl-start-irq by default and additionally for
* wb-done-irq during timeout or serialize frame-trigger
*/
rc = _sde_encoder_phys_wb_wait_for_ctl_start(phys_enc);
pending_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
is_idle = _sde_encoder_phys_wb_is_idle(phys_enc);
if (rc || (pending_cnt > 1) || (pending_cnt && is_idle)
|| (!rc && (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_SERIALIZE))) {
for (i = 0; i < pending_cnt; i++)
rc |= _sde_encoder_phys_wb_wait_for_idle(phys_enc, true);
if (rc) {
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
phys_enc->frame_trigger_mode,
atomic_read(&phys_enc->pending_kickoff_cnt), is_idle, rc);
SDE_ERROR("wb:%d failed wait_for_idle:%d\n", WBID(wb_enc), rc);
}
}
end:
/* cleanup any pending previous buffer */
if (wb_enc->old_fb && wb_enc->old_aspace) {
msm_framebuffer_cleanup(wb_enc->old_fb, wb_enc->old_aspace);
drm_framebuffer_put(wb_enc->old_fb);
wb_enc->old_fb = NULL;
wb_enc->old_aspace = NULL;
} }
return rc; return rc;
} }
static int sde_encoder_phys_wb_wait_for_tx_complete( static int sde_encoder_phys_wb_wait_for_tx_complete(struct sde_encoder_phys *phys_enc)
struct sde_encoder_phys *phys_enc)
{ {
if (!atomic_read(&phys_enc->pending_retire_fence_cnt)) int rc = 0;
return 0;
return _sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true); if (atomic_read(&phys_enc->pending_kickoff_cnt))
rc = _sde_encoder_phys_wb_wait_for_idle(phys_enc, true);
if ((phys_enc->enable_state == SDE_ENC_DISABLING) && phys_enc->in_clone_mode) {
_sde_encoder_phys_wb_reset_state(phys_enc);
sde_encoder_phys_wb_irq_ctrl(phys_enc, false);
}
return rc;
} }
/** /**
@@ -1558,12 +1683,20 @@ static int sde_encoder_phys_wb_prepare_for_kickoff(struct sde_encoder_phys *phys
struct sde_encoder_kickoff_params *params) struct sde_encoder_kickoff_params *params)
{ {
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
int ret = 0;
if (phys_enc->in_clone_mode) { phys_enc->frame_trigger_mode = params->frame_trigger_mode;
wb_enc->cwb_old_fb = wb_enc->wb_fb; if (!phys_enc->in_clone_mode && (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_DEFAULT)
wb_enc->cwb_old_aspace = wb_enc->wb_aspace; && (atomic_read(&phys_enc->pending_kickoff_cnt))) {
ret = _sde_encoder_phys_wb_wait_for_idle(phys_enc, true);
if (ret)
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
} }
/* cache the framebuffer/aspace for cleanup later */
wb_enc->old_fb = wb_enc->wb_fb;
wb_enc->old_aspace = wb_enc->wb_aspace;
/* set OT limit & enable traffic shaper */ /* set OT limit & enable traffic shaper */
sde_encoder_phys_wb_setup(phys_enc); sde_encoder_phys_wb_setup(phys_enc);
@@ -1572,8 +1705,8 @@ static int sde_encoder_phys_wb_prepare_for_kickoff(struct sde_encoder_phys *phys
_sde_encoder_phys_wb_update_cwb_flush(phys_enc, true); _sde_encoder_phys_wb_update_cwb_flush(phys_enc, true);
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode, SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode,
atomic_read(&phys_enc->pending_retire_fence_cnt)); phys_enc->frame_trigger_mode, ret);
return 0; return ret;
} }
/** /**
@@ -1783,15 +1916,14 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
struct sde_hw_wb *hw_wb = wb_enc->hw_wb; struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
if (phys_enc->enable_state == SDE_ENC_DISABLED) { if (phys_enc->enable_state == SDE_ENC_DISABLED) {
SDE_ERROR("encoder is already disabled\n"); SDE_ERROR("encoder is already disabled\n");
return; return;
} }
if (!phys_enc->in_clone_mode || !wb_enc->crtc->state->active) SDE_DEBUG("enc:%d, wb:%d, clone_mode:%d, kickoff_cnt:%u\n",
_sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true); DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode,
atomic_read(&phys_enc->pending_kickoff_cnt));
if (!phys_enc->hw_ctl || !phys_enc->parent || if (!phys_enc->hw_ctl || !phys_enc->parent ||
!phys_enc->sde_kms || !wb_enc->fb_disable) { !phys_enc->sde_kms || !wb_enc->fb_disable) {
@@ -1839,10 +1971,11 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl); phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
sde_encoder_helper_trigger_start(phys_enc); sde_encoder_helper_trigger_start(phys_enc);
_sde_encoder_phys_wb_wait_for_commit_done(phys_enc, true); _sde_encoder_phys_wb_wait_for_idle(phys_enc, true);
sde_encoder_phys_wb_irq_ctrl(phys_enc, false); sde_encoder_phys_wb_irq_ctrl(phys_enc, false);
exit: exit:
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), phys_enc->in_clone_mode);
_sde_encoder_phys_wb_reset_state(phys_enc); _sde_encoder_phys_wb_reset_state(phys_enc);
} }
@@ -1980,7 +2113,7 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
const struct sde_wb_cfg *wb_cfg; const struct sde_wb_cfg *wb_cfg;
struct sde_hw_mdp *hw_mdp; struct sde_hw_mdp *hw_mdp;
struct sde_encoder_irq *irq; struct sde_encoder_irq *irq;
int ret = 0; int ret = 0, i;
SDE_DEBUG("\n"); SDE_DEBUG("\n");
@@ -2056,81 +2189,82 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
phys_enc->intf_mode = INTF_MODE_WB_LINE; phys_enc->intf_mode = INTF_MODE_WB_LINE;
phys_enc->intf_idx = p->intf_idx; phys_enc->intf_idx = p->intf_idx;
phys_enc->enc_spinlock = p->enc_spinlock; phys_enc->enc_spinlock = p->enc_spinlock;
phys_enc->vblank_ctl_lock = p->vblank_ctl_lock;
atomic_set(&phys_enc->pending_retire_fence_cnt, 0); atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctl_start_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq); init_waitqueue_head(&phys_enc->pending_kickoff_wq);
wb_cfg = wb_enc->hw_wb->caps; wb_cfg = wb_enc->hw_wb->caps;
irq = &phys_enc->irq[INTR_IDX_WB_DONE]; for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];
INIT_LIST_HEAD(&irq->cb.list); INIT_LIST_HEAD(&irq->cb.list);
irq->irq_idx = -EINVAL;
irq->hw_idx = -EINVAL;
irq->cb.arg = wb_enc;
}
irq = &phys_enc->irq[INTR_IDX_WB_DONE];
irq->name = "wb_done"; irq->name = "wb_done";
irq->hw_idx = wb_enc->hw_wb->idx; irq->hw_idx = wb_enc->hw_wb->idx;
irq->irq_idx = -1;
irq->intr_type = sde_encoder_phys_wb_get_intr_type(wb_enc->hw_wb); irq->intr_type = sde_encoder_phys_wb_get_intr_type(wb_enc->hw_wb);
irq->intr_idx = INTR_IDX_WB_DONE; irq->intr_idx = INTR_IDX_WB_DONE;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_wb_done_irq; irq->cb.func = sde_encoder_phys_wb_done_irq;
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->name = "ctl_start";
irq->intr_type = SDE_IRQ_TYPE_CTL_START;
irq->intr_idx = INTR_IDX_CTL_START;
irq->cb.func = sde_encoder_phys_wb_ctl_start_irq;
irq = &phys_enc->irq[INTR_IDX_WB_LINEPTR];
irq->name = "lineptr_irq";
irq->hw_idx = wb_enc->hw_wb->idx;
irq->intr_type = SDE_IRQ_TYPE_WB_PROG_LINE;
irq->intr_idx = INTR_IDX_WB_LINEPTR;
irq->cb.func = sde_encoder_phys_wb_lineptr_irq;
if (wb_cfg && (wb_cfg->features & BIT(SDE_WB_HAS_DCWB))) { if (wb_cfg && (wb_cfg->features & BIT(SDE_WB_HAS_DCWB))) {
irq = &phys_enc->irq[INTR_IDX_PP_CWB_OVFL]; irq = &phys_enc->irq[INTR_IDX_PP_CWB_OVFL];
INIT_LIST_HEAD(&irq->cb.list);
irq->name = "pp_cwb0_overflow"; irq->name = "pp_cwb0_overflow";
irq->hw_idx = PINGPONG_CWB_0; irq->hw_idx = PINGPONG_CWB_0;
irq->irq_idx = -1;
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
irq->intr_idx = INTR_IDX_PP_CWB_OVFL; irq->intr_idx = INTR_IDX_PP_CWB_OVFL;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_cwb_ovflow; irq->cb.func = sde_encoder_phys_cwb_ovflow;
} else { } else {
irq = &phys_enc->irq[INTR_IDX_PP1_OVFL]; irq = &phys_enc->irq[INTR_IDX_PP1_OVFL];
INIT_LIST_HEAD(&irq->cb.list);
irq->name = "pp1_overflow"; irq->name = "pp1_overflow";
irq->hw_idx = CWB_1; irq->hw_idx = CWB_1;
irq->irq_idx = -1;
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
irq->intr_idx = INTR_IDX_PP1_OVFL; irq->intr_idx = INTR_IDX_PP1_OVFL;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_cwb_ovflow; irq->cb.func = sde_encoder_phys_cwb_ovflow;
irq = &phys_enc->irq[INTR_IDX_PP2_OVFL]; irq = &phys_enc->irq[INTR_IDX_PP2_OVFL];
INIT_LIST_HEAD(&irq->cb.list);
irq->name = "pp2_overflow"; irq->name = "pp2_overflow";
irq->hw_idx = CWB_2; irq->hw_idx = CWB_2;
irq->irq_idx = -1;
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
irq->intr_idx = INTR_IDX_PP2_OVFL; irq->intr_idx = INTR_IDX_PP2_OVFL;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_cwb_ovflow; irq->cb.func = sde_encoder_phys_cwb_ovflow;
irq = &phys_enc->irq[INTR_IDX_PP3_OVFL]; irq = &phys_enc->irq[INTR_IDX_PP3_OVFL];
INIT_LIST_HEAD(&irq->cb.list);
irq->name = "pp3_overflow"; irq->name = "pp3_overflow";
irq->hw_idx = CWB_3; irq->hw_idx = CWB_3;
irq->irq_idx = -1;
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
irq->intr_idx = INTR_IDX_PP3_OVFL; irq->intr_idx = INTR_IDX_PP3_OVFL;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_cwb_ovflow; irq->cb.func = sde_encoder_phys_cwb_ovflow;
irq = &phys_enc->irq[INTR_IDX_PP4_OVFL]; irq = &phys_enc->irq[INTR_IDX_PP4_OVFL];
INIT_LIST_HEAD(&irq->cb.list);
irq->name = "pp4_overflow"; irq->name = "pp4_overflow";
irq->hw_idx = CWB_4; irq->hw_idx = CWB_4;
irq->irq_idx = -1;
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
irq->intr_idx = INTR_IDX_PP4_OVFL; irq->intr_idx = INTR_IDX_PP4_OVFL;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_cwb_ovflow; irq->cb.func = sde_encoder_phys_cwb_ovflow;
irq = &phys_enc->irq[INTR_IDX_PP5_OVFL]; irq = &phys_enc->irq[INTR_IDX_PP5_OVFL];
INIT_LIST_HEAD(&irq->cb.list);
irq->name = "pp5_overflow"; irq->name = "pp5_overflow";
irq->hw_idx = CWB_5; irq->hw_idx = CWB_5;
irq->irq_idx = -1;
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
irq->intr_idx = INTR_IDX_PP5_OVFL; irq->intr_idx = INTR_IDX_PP5_OVFL;
irq->cb.arg = wb_enc;
irq->cb.func = sde_encoder_phys_cwb_ovflow; irq->cb.func = sde_encoder_phys_cwb_ovflow;
} }

View File

@@ -1348,6 +1348,10 @@ static int _add_to_irq_offset_list(struct sde_mdss_cfg *sde_cfg,
if (instance >= LTM_MAX) if (instance >= LTM_MAX)
err = true; err = true;
break; break;
case SDE_INTR_HWBLK_WB:
if (instance >= WB_MAX)
err = true;
break;
default: default:
SDE_ERROR("invalid hwblk_type: %d", blk_type); SDE_ERROR("invalid hwblk_type: %d", blk_type);
return -EINVAL; return -EINVAL;
@@ -2539,13 +2543,10 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
set_bit(SDE_WB_LINE_MODE, &wb->features); set_bit(SDE_WB_LINE_MODE, &wb->features);
else else
set_bit(SDE_WB_BLOCK_MODE, &wb->features); set_bit(SDE_WB_BLOCK_MODE, &wb->features);
set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
set_bit(SDE_WB_YUV_CONFIG, &wb->features);
if (test_bit(SDE_FEATURE_CDP, sde_cfg->features)) if (test_bit(SDE_FEATURE_CDP, sde_cfg->features))
set_bit(SDE_WB_CDP, &wb->features); set_bit(SDE_WB_CDP, &wb->features);
set_bit(SDE_WB_QOS, &wb->features);
if (sde_cfg->vbif_qos_nlvl == 8) if (sde_cfg->vbif_qos_nlvl == 8)
set_bit(SDE_WB_QOS_8LVL, &wb->features); set_bit(SDE_WB_QOS_8LVL, &wb->features);
@@ -2555,11 +2556,16 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
if (test_bit(SDE_FEATURE_CWB_CROP, sde_cfg->features)) if (test_bit(SDE_FEATURE_CWB_CROP, sde_cfg->features))
set_bit(SDE_WB_CROP, &wb->features); set_bit(SDE_WB_CROP, &wb->features);
set_bit(SDE_WB_XY_ROI_OFFSET, &wb->features);
if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev)) if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
set_bit(SDE_WB_INPUT_CTRL, &wb->features); set_bit(SDE_WB_INPUT_CTRL, &wb->features);
if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_900)) {
set_bit(SDE_WB_PROG_LINE, &wb->features);
set_bit(SDE_WB_SYS_CACHE, &wb->features);
}
rc = _add_to_irq_offset_list(sde_cfg, SDE_INTR_HWBLK_WB, wb->id, wb->base);
if (test_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features)) { if (test_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features)) {
set_bit(SDE_WB_HAS_DCWB, &wb->features); set_bit(SDE_WB_HAS_DCWB, &wb->features);
if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev)) if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
@@ -3483,36 +3489,57 @@ static int sde_cache_parse_dt(struct device_node *np,
struct sde_mdss_cfg *sde_cfg) struct sde_mdss_cfg *sde_cfg)
{ {
struct llcc_slice_desc *slice; struct llcc_slice_desc *slice;
struct sde_sc_cfg *sc_cfg = sde_cfg->sc_cfg;
struct device_node *llcc_node; struct device_node *llcc_node;
int i;
if (!sde_cfg) { if (!sde_cfg) {
SDE_ERROR("invalid argument\n"); SDE_ERROR("invalid argument\n");
return -EINVAL; return -EINVAL;
} }
if (!test_bit(SDE_FEATURE_SYSCACHE, sde_cfg->features))
return 0;
llcc_node = of_find_node_by_name(NULL, "cache-controller"); llcc_node = of_find_node_by_name(NULL, "cache-controller");
if (!llcc_node) { if (!llcc_node) {
SDE_DEBUG("cache controller missing, will disable img cache\n"); SDE_DEBUG("cache controller missing, will disable img cache\n");
return 0; return 0;
} }
slice = llcc_slice_getd(LLCC_DISP); for (i = 0; i < SDE_SYS_CACHE_MAX; i++) {
struct sde_sc_cfg *sc_cfg = &sde_cfg->sc_cfg[i];
u32 usecase_id = 0;
if (!sc_cfg->has_sys_cache)
continue;
switch (i) {
case SDE_SYS_CACHE_DISP:
usecase_id = LLCC_DISP;
break;
case SDE_SYS_CACHE_DISP_WB:
usecase_id = LLCC_DISP;
break;
default:
usecase_id = 0;
SDE_DEBUG("invalid sys cache:%d\n", i);
break;
}
if (!usecase_id)
continue;
slice = llcc_slice_getd(usecase_id);
if (IS_ERR_OR_NULL(slice)) { if (IS_ERR_OR_NULL(slice)) {
SDE_ERROR("failed to get system cache %ld\n", PTR_ERR(slice)); SDE_ERROR("failed to get system cache %ld\n", PTR_ERR(slice));
return -EINVAL; return -EINVAL;
} }
sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true; sc_cfg->llcc_scid = llcc_get_slice_id(slice);
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid = llcc_get_slice_id(slice); sc_cfg->llcc_slice_size = llcc_get_slice_size(slice);
sc_cfg[SDE_SYS_CACHE_DISP].llcc_slice_size = llcc_get_slice_size(slice); SDE_DEBUG("img cache:%d usecase_id:%d, scid:%d slice_size:%zu kb\n",
SDE_DEBUG("img cache scid:%d slice_size:%zu kb\n", i, usecase_id, sc_cfg->llcc_scid, sc_cfg->llcc_slice_size);
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid,
sc_cfg[SDE_SYS_CACHE_DISP].llcc_slice_size);
llcc_slice_putd(slice); llcc_slice_putd(slice);
}
return 0; return 0;
} }
@@ -5006,7 +5033,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features); set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
sde_cfg->mdss_hw_block_size = 0x158; sde_cfg->mdss_hw_block_size = 0x158;
set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features); set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
set_bit(SDE_FEATURE_SYSCACHE, sde_cfg->features); sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
} else if (IS_HOLI_TARGET(hw_rev)) { } else if (IS_HOLI_TARGET(hw_rev)) {
set_bit(SDE_FEATURE_QSYNC, sde_cfg->features); set_bit(SDE_FEATURE_QSYNC, sde_cfg->features);
sde_cfg->perf.min_prefill_lines = 24; sde_cfg->perf.min_prefill_lines = 24;
@@ -5036,7 +5063,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features); set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
sde_cfg->mdss_hw_block_size = 0x158; sde_cfg->mdss_hw_block_size = 0x158;
set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features); set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
set_bit(SDE_FEATURE_SYSCACHE, sde_cfg->features); sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
} else if (IS_WAIPIO_TARGET(hw_rev)) { } else if (IS_WAIPIO_TARGET(hw_rev)) {
sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH; sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH;
set_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features); set_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features);
@@ -5058,7 +5085,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features); set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features); set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
sde_cfg->mdss_hw_block_size = 0x158; sde_cfg->mdss_hw_block_size = 0x158;
set_bit(SDE_FEATURE_SYSCACHE, sde_cfg->features); sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features); set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features);
set_bit(SDE_FEATURE_FP16, sde_cfg->features); set_bit(SDE_FEATURE_FP16, sde_cfg->features);
set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features); set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features);
@@ -5100,7 +5127,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
set_bit(SDE_FEATURE_VIG_P010, sde_cfg->features); set_bit(SDE_FEATURE_VIG_P010, sde_cfg->features);
set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features); set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features); set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
set_bit(SDE_FEATURE_SYSCACHE, sde_cfg->features);
set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features); set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features);
set_bit(SDE_FEATURE_FP16, sde_cfg->features); set_bit(SDE_FEATURE_FP16, sde_cfg->features);
set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features); set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features);
@@ -5109,7 +5135,9 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
set_bit(SDE_FEATURE_HW_VSYNC_TS, sde_cfg->features); set_bit(SDE_FEATURE_HW_VSYNC_TS, sde_cfg->features);
set_bit(SDE_FEATURE_AVR_STEP, sde_cfg->features); set_bit(SDE_FEATURE_AVR_STEP, sde_cfg->features);
set_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_cfg->features); set_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_cfg->features);
sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH; sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH;
sde_cfg->autorefresh_disable_seq = AUTOREFRESH_DISABLE_SEQ2;
sde_cfg->perf.min_prefill_lines = 40; sde_cfg->perf.min_prefill_lines = 40;
sde_cfg->vbif_qos_nlvl = 8; sde_cfg->vbif_qos_nlvl = 8;
sde_cfg->ts_prefill_rev = 2; sde_cfg->ts_prefill_rev = 2;

View File

@@ -166,13 +166,13 @@ enum {
IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_40) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_40)
/** /**
* Supported SSPP system cache settings * Supported system cache settings
*/ */
#define SSPP_SYS_CACHE_EN_FLAG BIT(0) #define SYS_CACHE_EN_FLAG BIT(0)
#define SSPP_SYS_CACHE_SCID BIT(1) #define SYS_CACHE_SCID BIT(1)
#define SSPP_SYS_CACHE_OP_MODE BIT(2) #define SYS_CACHE_OP_MODE BIT(2)
#define SSPP_SYS_CACHE_OP_TYPE BIT(3) #define SYS_CACHE_OP_TYPE BIT(3)
#define SSPP_SYS_CACHE_NO_ALLOC BIT(4) #define SYS_CACHE_NO_ALLOC BIT(4)
/** /**
* sde_sys_cache_type: Types of system cache supported * sde_sys_cache_type: Types of system cache supported
@@ -182,6 +182,7 @@ enum {
*/ */
enum sde_sys_cache_type { enum sde_sys_cache_type {
SDE_SYS_CACHE_DISP, SDE_SYS_CACHE_DISP,
SDE_SYS_CACHE_DISP_WB,
SDE_SYS_CACHE_MAX, SDE_SYS_CACHE_MAX,
SDE_SYS_CACHE_NONE = SDE_SYS_CACHE_MAX SDE_SYS_CACHE_NONE = SDE_SYS_CACHE_MAX
}; };
@@ -196,6 +197,7 @@ enum sde_intr_hwblk_type {
SDE_INTR_HWBLK_AD4, SDE_INTR_HWBLK_AD4,
SDE_INTR_HWBLK_INTF_TEAR, SDE_INTR_HWBLK_INTF_TEAR,
SDE_INTR_HWBLK_LTM, SDE_INTR_HWBLK_LTM,
SDE_INTR_HWBLK_WB,
SDE_INTR_HWBLK_MAX SDE_INTR_HWBLK_MAX
}; };
@@ -540,13 +542,8 @@ enum {
* @SDE_WB_CHROMA_DOWN, Writeback chroma down block, * @SDE_WB_CHROMA_DOWN, Writeback chroma down block,
* @SDE_WB_DOWNSCALE, Writeback integer downscaler, * @SDE_WB_DOWNSCALE, Writeback integer downscaler,
* @SDE_WB_DITHER, Dither block * @SDE_WB_DITHER, Dither block
* @SDE_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
* @SDE_WB_UBWC, Writeback Universal bandwidth compression * @SDE_WB_UBWC, Writeback Universal bandwidth compression
* @SDE_WB_YUV_CONFIG Writeback supports output of YUV colorspace
* @SDE_WB_PIPE_ALPHA Writeback supports pipe alpha * @SDE_WB_PIPE_ALPHA Writeback supports pipe alpha
* @SDE_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
* the destination image
* @SDE_WB_QOS, Writeback supports QoS control, danger/safe/creq
* @SDE_WB_QOS_8LVL, Writeback supports 8-level QoS control * @SDE_WB_QOS_8LVL, Writeback supports 8-level QoS control
* @SDE_WB_CDP Writeback supports client driven prefetch * @SDE_WB_CDP Writeback supports client driven prefetch
* @SDE_WB_INPUT_CTRL Writeback supports from which pp block input pixel * @SDE_WB_INPUT_CTRL Writeback supports from which pp block input pixel
@@ -554,9 +551,11 @@ enum {
* @SDE_WB_HAS_CWB Writeback block supports concurrent writeback * @SDE_WB_HAS_CWB Writeback block supports concurrent writeback
* @SDE_WB_HAS_DCWB Writeback block supports dedicated CWB * @SDE_WB_HAS_DCWB Writeback block supports dedicated CWB
* @SDE_WB_CROP CWB supports cropping * @SDE_WB_CROP CWB supports cropping
* @SDE_WB_SYS_CACHE Writeback block supports system cache usage
* @SDE_WB_CWB_CTRL Separate CWB control is available for configuring * @SDE_WB_CWB_CTRL Separate CWB control is available for configuring
* @SDE_WB_DCWB_CTRL Separate DCWB control is available for configuring * @SDE_WB_DCWB_CTRL Separate DCWB control is available for configuring
* @SDE_WB_CWB_DITHER_CTRL CWB dither is available for configuring * @SDE_WB_CWB_DITHER_CTRL CWB dither is available for configuring
* @SDE_WB_PROG_LINE Writeback block supports programmable line ptr
* @SDE_WB_MAX maximum value * @SDE_WB_MAX maximum value
*/ */
enum { enum {
@@ -567,21 +566,19 @@ enum {
SDE_WB_CHROMA_DOWN, SDE_WB_CHROMA_DOWN,
SDE_WB_DOWNSCALE, SDE_WB_DOWNSCALE,
SDE_WB_DITHER, SDE_WB_DITHER,
SDE_WB_TRAFFIC_SHAPER,
SDE_WB_UBWC, SDE_WB_UBWC,
SDE_WB_YUV_CONFIG,
SDE_WB_PIPE_ALPHA, SDE_WB_PIPE_ALPHA,
SDE_WB_XY_ROI_OFFSET,
SDE_WB_QOS,
SDE_WB_QOS_8LVL, SDE_WB_QOS_8LVL,
SDE_WB_CDP, SDE_WB_CDP,
SDE_WB_INPUT_CTRL, SDE_WB_INPUT_CTRL,
SDE_WB_HAS_CWB, SDE_WB_HAS_CWB,
SDE_WB_HAS_DCWB, SDE_WB_HAS_DCWB,
SDE_WB_CROP, SDE_WB_CROP,
SDE_WB_SYS_CACHE,
SDE_WB_CWB_CTRL, SDE_WB_CWB_CTRL,
SDE_WB_DCWB_CTRL, SDE_WB_DCWB_CTRL,
SDE_WB_CWB_DITHER_CTRL, SDE_WB_CWB_DITHER_CTRL,
SDE_WB_PROG_LINE,
SDE_WB_MAX SDE_WB_MAX
}; };
@@ -1524,6 +1521,17 @@ struct sde_sc_cfg {
size_t llcc_slice_size; size_t llcc_slice_size;
}; };
/**
* autorefresh_disable_sequence - defines autorefresh disable sequences
* followed during bootup with continuous splash
* @AUTOREFRESH_DISABLE_SEQ1 - disable TE / disable autorefresh / Wait for tx-complete / enable TE
* @AUTOREFRESH_DISABLE_SEQ2 - disable TE / Disable autorefresh / enable TE
*/
enum autorefresh_disable_sequence {
AUTOREFRESH_DISABLE_SEQ1,
AUTOREFRESH_DISABLE_SEQ2,
};
/** /**
* struct sde_perf_cfg - performance control settings * struct sde_perf_cfg - performance control settings
* @max_bw_low low threshold of maximum bandwidth (kbps) * @max_bw_low low threshold of maximum bandwidth (kbps)
@@ -1681,6 +1689,7 @@ struct sde_perf_cfg {
* @pipe_order_type indicates if it is required to specify pipe order * @pipe_order_type indicates if it is required to specify pipe order
* @csc_type csc or csc_10bit support * @csc_type csc or csc_10bit support
* @allowed_dsc_reservation_switch intf to which dsc reservation switch is supported * @allowed_dsc_reservation_switch intf to which dsc reservation switch is supported
* @autorefresh_disable_seq indicates the autorefresh disable sequence; default is seq1
* @sc_cfg system cache configuration * @sc_cfg system cache configuration
* @perf performance control settings * @perf performance control settings
* @uidle_cfg settings for uidle feature * @uidle_cfg settings for uidle feature
@@ -1784,6 +1793,7 @@ struct sde_mdss_cfg {
u32 pipe_order_type; u32 pipe_order_type;
u32 csc_type; u32 csc_type;
u32 allowed_dsc_reservation_switch; u32 allowed_dsc_reservation_switch;
enum autorefresh_disable_sequence autorefresh_disable_seq;
struct sde_sc_cfg sc_cfg[SDE_SYS_CACHE_MAX]; struct sde_sc_cfg sc_cfg[SDE_SYS_CACHE_MAX];
struct sde_perf_cfg perf; struct sde_perf_cfg perf;
struct sde_uidle_cfg uidle_cfg; struct sde_uidle_cfg uidle_cfg;

View File

@@ -27,6 +27,9 @@
#define MDP_LTM_INTR_EN_OFF 0x50 #define MDP_LTM_INTR_EN_OFF 0x50
#define MDP_LTM_INTR_STATUS_OFF 0x54 #define MDP_LTM_INTR_STATUS_OFF 0x54
#define MDP_LTM_INTR_CLEAR_OFF 0x58 #define MDP_LTM_INTR_CLEAR_OFF 0x58
#define MDP_WB_INTR_EN_OFF 0x18C
#define MDP_WB_INTR_STATUS_OFF 0x190
#define MDP_WB_INTR_CLEAR_OFF 0x194
/** /**
* WB interrupt status bit definitions * WB interrupt status bit definitions
@@ -193,6 +196,11 @@
#define SDE_INTR_LTM_STATS_DONE BIT(0) #define SDE_INTR_LTM_STATS_DONE BIT(0)
#define SDE_INTR_LTM_STATS_WB_PB BIT(5) #define SDE_INTR_LTM_STATS_WB_PB BIT(5)
/**
* WB interrupt status bit definitions
*/
#define SDE_INTR_WB_PROG_LINE BIT(0)
/** /**
* struct sde_intr_reg - array of SDE register sets * struct sde_intr_reg - array of SDE register sets
* @clr_off: offset to CLEAR reg * @clr_off: offset to CLEAR reg
@@ -425,6 +433,10 @@ static struct sde_irq_type sde_irq_ltm_map[] = {
{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1}, { SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
}; };
static struct sde_irq_type sde_irq_wb_map[] = {
{ SDE_IRQ_TYPE_WB_PROG_LINE, -1, SDE_INTR_WB_PROG_LINE, -1},
};
static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr, static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
enum sde_intr_type intr_type, u32 instance_idx) enum sde_intr_type intr_type, u32 instance_idx)
{ {
@@ -867,6 +879,12 @@ static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF; sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF; sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
break; break;
case SDE_INTR_HWBLK_WB:
sde_irq->clr_off = base_offset + MDP_WB_INTR_CLEAR_OFF;
sde_irq->en_off = base_offset + MDP_WB_INTR_EN_OFF;
sde_irq->status_off = base_offset + MDP_WB_INTR_STATUS_OFF;
break;
default: default:
pr_err("unrecognized intr blk type %d\n", pr_err("unrecognized intr blk type %d\n",
item->type); item->type);
@@ -956,6 +974,9 @@ static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
case SDE_INTR_HWBLK_LTM: case SDE_INTR_HWBLK_LTM:
ret = ARRAY_SIZE(sde_irq_ltm_map); ret = ARRAY_SIZE(sde_irq_ltm_map);
break; break;
case SDE_INTR_HWBLK_WB:
ret = ARRAY_SIZE(sde_irq_wb_map);
break;
default: default:
pr_err("invalid type: %d\n", item->type); pr_err("invalid type: %d\n", item->type);
} }
@@ -1006,6 +1027,9 @@ static inline struct sde_irq_type *_get_irq_map_addr(
case SDE_INTR_HWBLK_LTM: case SDE_INTR_HWBLK_LTM:
ret = sde_irq_ltm_map; ret = sde_irq_ltm_map;
break; break;
case SDE_INTR_HWBLK_WB:
ret = sde_irq_wb_map;
break;
default: default:
pr_err("invalid type: %d\n", item->type); pr_err("invalid type: %d\n", item->type);
} }

View File

@@ -56,6 +56,7 @@
* @SDE_IRQ_TYPE_INTF_TEAR_TE_CHECK: INTF Tear TE detection * @SDE_IRQ_TYPE_INTF_TEAR_TE_CHECK: INTF Tear TE detection
* @SDE_IRQ_TYPE_LTM_STATS_DONE: LTM stats done interrupt * @SDE_IRQ_TYPE_LTM_STATS_DONE: LTM stats done interrupt
* @SDE_IRQ_TYPE_LTM_STATS_WB_PB: LTM stats WB push back interrupt * @SDE_IRQ_TYPE_LTM_STATS_WB_PB: LTM stats WB push back interrupt
* @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt for WB
* @SDE_IRQ_TYPE_RESERVED: Reserved for expansion * @SDE_IRQ_TYPE_RESERVED: Reserved for expansion
*/ */
enum sde_intr_type { enum sde_intr_type {
@@ -93,6 +94,7 @@ enum sde_intr_type {
SDE_IRQ_TYPE_INTF_TEAR_TE_CHECK, SDE_IRQ_TYPE_INTF_TEAR_TE_CHECK,
SDE_IRQ_TYPE_LTM_STATS_DONE, SDE_IRQ_TYPE_LTM_STATS_DONE,
SDE_IRQ_TYPE_LTM_STATS_WB_PB, SDE_IRQ_TYPE_LTM_STATS_WB_PB,
SDE_IRQ_TYPE_WB_PROG_LINE,
SDE_IRQ_TYPE_RESERVED, SDE_IRQ_TYPE_RESERVED,
}; };

View File

@@ -1148,19 +1148,19 @@ static void sde_hw_sspp_setup_sys_cache(struct sde_hw_pipe *ctx,
val = SDE_REG_READ(&ctx->hw, SSPP_SYS_CACHE_MODE + idx); val = SDE_REG_READ(&ctx->hw, SSPP_SYS_CACHE_MODE + idx);
if (cfg->flags & SSPP_SYS_CACHE_EN_FLAG) if (cfg->flags & SYS_CACHE_EN_FLAG)
val = (val & ~BIT(15)) | ((cfg->rd_en & 0x1) << 15); val = (val & ~BIT(15)) | ((cfg->rd_en & 0x1) << 15);
if (cfg->flags & SSPP_SYS_CACHE_SCID) if (cfg->flags & SYS_CACHE_SCID)
val = (val & ~0x1F00) | ((cfg->rd_scid & 0x1f) << 8); val = (val & ~0x1F00) | ((cfg->rd_scid & 0x1f) << 8);
if (cfg->flags & SSPP_SYS_CACHE_OP_MODE) if (cfg->flags & SYS_CACHE_OP_MODE)
val = (val & ~0xC0000) | ((cfg->op_mode & 0x3) << 18); val = (val & ~0xC0000) | ((cfg->op_mode & 0x3) << 18);
if (cfg->flags & SSPP_SYS_CACHE_OP_TYPE) if (cfg->flags & SYS_CACHE_OP_TYPE)
val = (val & ~0xF) | ((cfg->rd_op_type & 0xf) << 0); val = (val & ~0xF) | ((cfg->rd_op_type & 0xf) << 0);
if (cfg->flags & SSPP_SYS_CACHE_NO_ALLOC) if (cfg->flags & SYS_CACHE_NO_ALLOC)
val = (val & ~0x10) | ((cfg->rd_noallocate & 0x1) << 4); val = (val & ~0x10) | ((cfg->rd_noallocate & 0x1) << 4);
SDE_REG_WRITE(&ctx->hw, SSPP_SYS_CACHE_MODE + idx, val); SDE_REG_WRITE(&ctx->hw, SSPP_SYS_CACHE_MODE + idx, val);

View File

@@ -21,19 +21,7 @@
#define WB_DST3_ADDR 0x018 #define WB_DST3_ADDR 0x018
#define WB_DST_YSTRIDE0 0x01C #define WB_DST_YSTRIDE0 0x01C
#define WB_DST_YSTRIDE1 0x020 #define WB_DST_YSTRIDE1 0x020
#define WB_DST_YSTRIDE1 0x020
#define WB_DST_DITHER_BITDEPTH 0x024
#define WB_DST_MATRIX_ROW0 0x030
#define WB_DST_MATRIX_ROW1 0x034
#define WB_DST_MATRIX_ROW2 0x038
#define WB_DST_MATRIX_ROW3 0x03C
#define WB_DST_WRITE_CONFIG 0x048 #define WB_DST_WRITE_CONFIG 0x048
#define WB_ROTATION_DNSCALER 0x050
#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
#define WB_N16_INIT_PHASE_X_C03 0x060
#define WB_N16_INIT_PHASE_X_C12 0x064
#define WB_N16_INIT_PHASE_Y_C03 0x068
#define WB_N16_INIT_PHASE_Y_C12 0x06C
#define WB_OUT_SIZE 0x074 #define WB_OUT_SIZE 0x074
#define WB_ALPHA_X_VALUE 0x078 #define WB_ALPHA_X_VALUE 0x078
#define WB_DANGER_LUT 0x084 #define WB_DANGER_LUT 0x084
@@ -47,11 +35,15 @@
#define WB_CROP_OFFSET 0x158 #define WB_CROP_OFFSET 0x158
#define WB_CLK_CTRL 0x178 #define WB_CLK_CTRL 0x178
#define WB_CLK_STATUS 0x17C #define WB_CLK_STATUS 0x17C
#define WB_LINE_COUNT 0x184
#define WB_PROG_LINE_COUNT 0x188
#define WB_CSC_BASE 0x260 #define WB_CSC_BASE 0x260
#define WB_DST_ADDR_SW_STATUS 0x2B0 #define WB_DST_ADDR_SW_STATUS 0x2B0
#define WB_CDP_CNTL 0x2B4 #define WB_CDP_CNTL 0x2B4
#define WB_UBWC_ERROR_STATUS 0x2BC
#define WB_OUT_IMAGE_SIZE 0x2C0 #define WB_OUT_IMAGE_SIZE 0x2C0
#define WB_OUT_XY 0x2C4 #define WB_OUT_XY 0x2C4
#define WB_SYS_CACHE_MODE 0x094
#define CWB_CTRL_SRC_SEL 0x0 #define CWB_CTRL_SRC_SEL 0x0
#define CWB_CTRL_MODE 0x4 #define CWB_CTRL_MODE 0x4
@@ -198,8 +190,7 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
dst_format |= BIT(14); /* DST_ALPHA_X */ dst_format |= BIT(14); /* DST_ALPHA_X */
} }
if (SDE_FORMAT_IS_YUV(fmt) && if (SDE_FORMAT_IS_YUV(fmt))
(ctx->caps->features & BIT(SDE_WB_YUV_CONFIG)))
dst_format |= BIT(15); dst_format |= BIT(15);
if (SDE_FORMAT_IS_DX(fmt)) if (SDE_FORMAT_IS_DX(fmt))
@@ -409,6 +400,28 @@ static void sde_hw_wb_program_cwb_ctrl(struct sde_hw_wb *ctx,
} }
} }
static void sde_hw_wb_setup_sys_cache(struct sde_hw_wb *ctx, struct sde_hw_wb_sc_cfg *cfg)
{
u32 val = 0;
if (!ctx || !cfg)
return;
if (cfg->flags & SYS_CACHE_EN_FLAG)
val |= BIT(15);
if (cfg->flags & SYS_CACHE_SCID)
val |= ((cfg->wr_scid & 0x1f) << 8);
if (cfg->flags & SYS_CACHE_OP_TYPE)
val |= ((cfg->wr_op_type & 0xf) << 0);
if (cfg->flags & SYS_CACHE_NO_ALLOC)
val |= ((cfg->wr_noallocate & 0x1) << 4);
SDE_REG_WRITE(&ctx->hw, WB_SYS_CACHE_MODE, val);
}
static void sde_hw_wb_program_cwb_dither_ctrl(struct sde_hw_wb *ctx, static void sde_hw_wb_program_cwb_dither_ctrl(struct sde_hw_wb *ctx,
const enum sde_dcwb dcwb_idx, void *cfg, size_t len, bool enable) const enum sde_dcwb dcwb_idx, void *cfg, size_t len, bool enable)
{ {
@@ -543,21 +556,55 @@ static int sde_hw_wb_get_clk_ctrl_status(struct sde_hw_blk_reg_map *hw,
return 0; return 0;
} }
static u32 sde_hw_wb_get_line_count(struct sde_hw_wb *ctx)
{
struct sde_hw_blk_reg_map *c;
c = &ctx->hw;
return SDE_REG_READ(c, WB_LINE_COUNT) & 0xFFFF;
}
static void sde_hw_wb_set_prog_line_count(struct sde_hw_wb *ctx, u32 val)
{
struct sde_hw_blk_reg_map *c;
c = &ctx->hw;
SDE_REG_WRITE(c, WB_PROG_LINE_COUNT, val);
}
static u32 sde_hw_wb_get_ubwc_error(struct sde_hw_wb *ctx)
{
struct sde_hw_blk_reg_map *c;
c = &ctx->hw;
return SDE_REG_READ(c, WB_UBWC_ERROR_STATUS) & 0xFF;
}
static void sde_hw_wb_clear_ubwc_error(struct sde_hw_wb *ctx)
{
struct sde_hw_blk_reg_map *c;
c = &ctx->hw;
return SDE_REG_WRITE(c, WB_UBWC_ERROR_STATUS, BIT(31));
}
static void _setup_wb_ops(struct sde_hw_wb_ops *ops, static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
unsigned long features) unsigned long features)
{ {
ops->setup_outaddress = sde_hw_wb_setup_outaddress; ops->setup_outaddress = sde_hw_wb_setup_outaddress;
ops->setup_outformat = sde_hw_wb_setup_format; ops->setup_outformat = sde_hw_wb_setup_format;
ops->setup_qos_lut = sde_hw_wb_setup_qos_lut;
if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
ops->setup_roi = sde_hw_wb_roi; ops->setup_roi = sde_hw_wb_roi;
ops->get_ubwc_error = sde_hw_wb_get_ubwc_error;
ops->clear_ubwc_error = sde_hw_wb_clear_ubwc_error;
if (test_bit(SDE_WB_CROP, &features)) if (test_bit(SDE_WB_CROP, &features))
ops->setup_crop = sde_hw_wb_crop; ops->setup_crop = sde_hw_wb_crop;
if (test_bit(SDE_WB_QOS, &features))
ops->setup_qos_lut = sde_hw_wb_setup_qos_lut;
if (test_bit(SDE_WB_CDP, &features)) if (test_bit(SDE_WB_CDP, &features))
ops->setup_cdp = sde_hw_wb_setup_cdp; ops->setup_cdp = sde_hw_wb_setup_cdp;
@@ -572,8 +619,16 @@ static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
ops->bind_dcwb_pp_blk = sde_hw_wb_bind_dcwb_pp_blk; ops->bind_dcwb_pp_blk = sde_hw_wb_bind_dcwb_pp_blk;
} }
if (test_bit(SDE_WB_SYS_CACHE, &features))
ops->setup_sys_cache = sde_hw_wb_setup_sys_cache;
if (test_bit(SDE_WB_CWB_DITHER_CTRL, &features)) if (test_bit(SDE_WB_CWB_DITHER_CTRL, &features))
ops->program_cwb_dither_ctrl = sde_hw_wb_program_cwb_dither_ctrl; ops->program_cwb_dither_ctrl = sde_hw_wb_program_cwb_dither_ctrl;
if (test_bit(SDE_WB_PROG_LINE, &features)) {
ops->get_line_count = sde_hw_wb_get_line_count;
ops->set_prog_line_count = sde_hw_wb_set_prog_line_count;
}
} }
struct sde_hw_blk_reg_map *sde_hw_wb_init(enum sde_wb idx, struct sde_hw_blk_reg_map *sde_hw_wb_init(enum sde_wb idx,

View File

@@ -61,6 +61,24 @@ struct sde_hw_wb_qos_cfg {
bool danger_safe_en; bool danger_safe_en;
}; };
/**
* struct sde_hw_wb_sc_cfg - system cache configuration
* @wr_en: system cache read enable
* @wr_scid: system cache read block id
* @wr_noallocate: system cache read no allocate attribute
* @wr_op_type: system cache read operation type
* @flags: dirty flags to change the configuration
* @type: sys cache type
*/
struct sde_hw_wb_sc_cfg {
bool wr_en;
u32 wr_scid;
bool wr_noallocate;
u32 wr_op_type;
u32 flags;
enum sde_sys_cache_type type;
};
/** /**
* *
* struct sde_hw_wb_ops : Interface to the wb Hw driver functions * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
@@ -152,6 +170,13 @@ struct sde_hw_wb_ops {
void (*program_dcwb_ctrl)(struct sde_hw_wb *ctx, const enum sde_dcwb cwb, void (*program_dcwb_ctrl)(struct sde_hw_wb *ctx, const enum sde_dcwb cwb,
const enum sde_cwb data_src, int tap_location, bool enable); const enum sde_cwb data_src, int tap_location, bool enable);
/**
* setup_sys_cache - setup system cache configuration
* @ctx: Pointer to wb context
* @cfg: Pointer to wb system cache configuration
*/
void (*setup_sys_cache)(struct sde_hw_wb *ctx, struct sde_hw_wb_sc_cfg *cfg);
/** /**
* program_cwb_dither_ctrl - program cwb dither block config * program_cwb_dither_ctrl - program cwb dither block config
* @ctx: Pointer to wb context * @ctx: Pointer to wb context
@@ -162,6 +187,31 @@ struct sde_hw_wb_ops {
*/ */
void (*program_cwb_dither_ctrl)(struct sde_hw_wb *ctx, void (*program_cwb_dither_ctrl)(struct sde_hw_wb *ctx,
const enum sde_dcwb dcwb_idx, void *cfg, size_t len, bool enable); const enum sde_dcwb dcwb_idx, void *cfg, size_t len, bool enable);
/**
* get_line_count - get current wb output linecount
* @ctx: Pointer to wb context
*/
u32 (*get_line_count)(struct sde_hw_wb *ctx);
/**
* set_prog_line_count - set wb programmable line
* @ctx: Pointer to wb context
* @line_count: programmable line-count value
*/
void (*set_prog_line_count)(struct sde_hw_wb *ctx, u32 line_count);
/**
* get_ubwc_error - get ubwc error status
* @ctx: Pointer to wb context
*/
u32 (*get_ubwc_error)(struct sde_hw_wb *ctx);
/**
* clear_ubwc_error - clear ubwc error status
* @ctx: Pointer to wb context
*/
void (*clear_ubwc_error)(struct sde_hw_wb *ctx);
}; };
/** /**

View File

@@ -1582,7 +1582,8 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
* mode panels. This may be a no-op for command mode panels. * mode panels. This may be a no-op for command mode panels.
*/ */
SDE_EVT32_VERBOSE(DRMID(crtc)); SDE_EVT32_VERBOSE(DRMID(crtc));
ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE); ret = sde_encoder_wait_for_event(encoder, cwb_disabling ?
MSM_ENC_TX_COMPLETE : MSM_ENC_COMMIT_DONE);
if (ret && ret != -EWOULDBLOCK) { if (ret && ret != -EWOULDBLOCK) {
SDE_ERROR("wait for commit done returned %d\n", ret); SDE_ERROR("wait for commit done returned %d\n", ret);
sde_crtc_request_frame_reset(crtc, encoder); sde_crtc_request_frame_reset(crtc, encoder);

View File

@@ -2783,48 +2783,32 @@ static void _sde_plane_sspp_setup_sys_cache(struct sde_plane *psde,
struct sde_plane_state *pstate) struct sde_plane_state *pstate)
{ {
struct sde_sc_cfg *sc_cfg = psde->catalog->sc_cfg; struct sde_sc_cfg *sc_cfg = psde->catalog->sc_cfg;
bool prev_rd_en; struct sde_hw_pipe_sc_cfg *cfg = &pstate->sc_cfg;
bool prev_rd_en = cfg->rd_en;
/* Only display system cache is currently supported */
if (!sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache) if (!sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache)
return; return;
prev_rd_en = pstate->sc_cfg.rd_en; cfg->rd_en = false;
cfg->rd_scid = 0x0;
cfg->flags = SYS_CACHE_EN_FLAG | SYS_CACHE_SCID;
cfg->type = SDE_SYS_CACHE_NONE;
SDE_DEBUG_PLANE(psde, "features:0x%x\n", psde->features); if ((pstate->static_cache_state == CACHE_STATE_FRAME_WRITE)
|| (pstate->static_cache_state == CACHE_STATE_FRAME_READ)) {
pstate->sc_cfg.rd_en = false; cfg->rd_en = true;
pstate->sc_cfg.rd_scid = 0x0; cfg->rd_scid = sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid;
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG | cfg->rd_noallocate = (pstate->static_cache_state == CACHE_STATE_FRAME_READ);
SSPP_SYS_CACHE_SCID; cfg->flags |= SYS_CACHE_NO_ALLOC;
pstate->sc_cfg.type = SDE_SYS_CACHE_NONE; cfg->type = SDE_SYS_CACHE_DISP;
if (pstate->static_cache_state == CACHE_STATE_FRAME_WRITE) {
pstate->sc_cfg.rd_en = true;
pstate->sc_cfg.rd_scid =
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid;
pstate->sc_cfg.rd_noallocate = false;
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
SSPP_SYS_CACHE_SCID | SSPP_SYS_CACHE_NO_ALLOC;
pstate->sc_cfg.type = SDE_SYS_CACHE_DISP;
} else if (pstate->static_cache_state == CACHE_STATE_FRAME_READ) {
pstate->sc_cfg.rd_en = true;
pstate->sc_cfg.rd_scid =
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid;
pstate->sc_cfg.rd_noallocate = true;
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
SSPP_SYS_CACHE_SCID | SSPP_SYS_CACHE_NO_ALLOC;
pstate->sc_cfg.type = SDE_SYS_CACHE_DISP;
} }
if (!pstate->sc_cfg.rd_en && !prev_rd_en) if (!cfg->rd_en && !prev_rd_en)
return; return;
SDE_EVT32(DRMID(&psde->base), pstate->sc_cfg.rd_scid, SDE_EVT32(DRMID(&psde->base), cfg->rd_scid, cfg->rd_en, cfg->rd_noallocate, cfg->flags);
pstate->sc_cfg.rd_en, pstate->sc_cfg.rd_noallocate);
psde->pipe_hw->ops.setup_sys_cache( psde->pipe_hw->ops.setup_sys_cache(psde->pipe_hw, cfg);
psde->pipe_hw, &pstate->sc_cfg);
} }
void sde_plane_static_img_control(struct drm_plane *plane, void sde_plane_static_img_control(struct drm_plane *plane,

View File

@@ -564,6 +564,10 @@ int sde_wb_connector_post_init(struct drm_connector *connector, void *display)
ARRAY_SIZE(e_fb_translation_mode), 0, ARRAY_SIZE(e_fb_translation_mode), 0,
CONNECTOR_PROP_FB_TRANSLATION_MODE); CONNECTOR_PROP_FB_TRANSLATION_MODE);
if (wb_dev->wb_cfg->features & BIT(SDE_WB_PROG_LINE))
msm_property_install_range(&c_conn->property_info, "early_fence_line",
0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_EARLY_FENCE_LINE);
sde_wb_connector_install_dither_property(wb_dev, c_conn); sde_wb_connector_install_dither_property(wb_dev, c_conn);
return 0; return 0;