Merge "disp: msm: sde: add vig formats before qseed and csc initializations"

This commit is contained in:
qctecmdr
2020-06-12 00:11:20 -07:00
committed by Gerrit - the friendly Code Review server
10 changed files with 49 additions and 19 deletions

View File

@@ -132,3 +132,5 @@ msm_drm-$(CONFIG_DRM_MSM) += \
msm_drm-$(CONFIG_HDCP_QSEECOM) += ../hdcp/msm_hdcp.o \
obj-$(CONFIG_DISPLAY_BUILD) += msm_drm.o
obj-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o

View File

@@ -449,7 +449,8 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (IS_ERR(obj->import_attach)) {
DRM_ERROR("dma_buf_attach failure, err=%ld\n",
PTR_ERR(obj->import_attach));
goto unlock;
ret = PTR_ERR(obj->import_attach);
return ret;
}
msm_obj->obj_dirty = false;
reattach = true;
@@ -462,14 +463,14 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (ret) {
DRM_ERROR("delayed dma-buf import failed %d\n",
ret);
goto unlock;
return ret;
}
}
vma = add_vma(obj, aspace);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto unlock;
return ret;
}
pages = get_pages(obj);
@@ -493,13 +494,10 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
mutex_unlock(&aspace->list_lock);
}
mutex_unlock(&msm_obj->lock);
return 0;
fail:
del_vma(vma);
unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}
static int msm_gem_pin_iova(struct drm_gem_object *obj,

View File

@@ -818,6 +818,9 @@ static void _sde_core_perf_crtc_update_check(struct drm_crtc *crtc,
struct sde_core_perf_params *new = &sde_crtc->new_perf;
int i;
if (!kms)
return;
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.

View File

@@ -567,7 +567,7 @@ static void _sde_crtc_setup_dim_layer_cfg(struct drm_crtc *crtc,
cstate->lm_roi[i].y;
}
SDE_EVT32_VERBOSE(DRMID(crtc),
SDE_EVT32(DRMID(crtc), dim_layer->stage,
cstate->lm_roi[i].x,
cstate->lm_roi[i].y,
cstate->lm_roi[i].w,

View File

@@ -2656,7 +2656,9 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
_sde_encoder_input_handler_register(drm_enc);
if (!(msm_is_mode_seamless_vrr(cur_mode)
if ((drm_enc->crtc->state->connectors_changed &&
sde_encoder_in_clone_mode(drm_enc)) ||
!(msm_is_mode_seamless_vrr(cur_mode)
|| msm_is_mode_seamless_dms(cur_mode)
|| msm_is_mode_seamless_dyn_clk(cur_mode)))
kthread_init_delayed_work(&sde_enc->delayed_off_work,

View File

@@ -227,6 +227,7 @@ enum {
PERF_CPU_MASK,
CPU_MASK_PERF,
PERF_CPU_DMA_LATENCY,
PERF_CPU_IRQ_LATENCY,
PERF_PROP_MAX,
};
@@ -607,6 +608,8 @@ static struct sde_prop_type sde_perf_prop[] = {
PROP_TYPE_U32},
{PERF_CPU_DMA_LATENCY, "qcom,sde-qos-cpu-dma-latency", false,
PROP_TYPE_U32},
{PERF_CPU_IRQ_LATENCY, "qcom,sde-qos-cpu-irq-latency", false,
PROP_TYPE_U32},
};
static struct sde_prop_type sde_qos_prop[] = {
@@ -1432,6 +1435,9 @@ static int _sde_sspp_setup_vigs(struct device_node *np,
set_bit(SDE_PERF_SSPP_QOS_8LVL, &sspp->perf_features);
vig_count++;
sblk->format_list = sde_cfg->vig_formats;
sblk->virt_format_list = sde_cfg->virt_vig_formats;
if ((sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) ||
(sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) ||
(sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3LITE)) {
@@ -1447,9 +1453,6 @@ static int _sde_sspp_setup_vigs(struct device_node *np,
_sde_sspp_setup_vigs_pp(props, sde_cfg, sspp);
sblk->format_list = sde_cfg->vig_formats;
sblk->virt_format_list = sde_cfg->virt_vig_formats;
if (sde_cfg->true_inline_rot_rev > 0) {
set_bit(SDE_SSPP_TRUE_INLINE_ROT, &sspp->features);
sblk->in_rot_format_list = sde_cfg->inline_rot_formats;
@@ -4167,6 +4170,10 @@ static int _sde_perf_parse_dt_cfg(struct device_node *np,
prop_exists[PERF_CPU_DMA_LATENCY] ?
PROP_VALUE_ACCESS(prop_value, PERF_CPU_DMA_LATENCY, 0) :
DEFAULT_CPU_DMA_LATENCY;
cfg->perf.cpu_irq_latency =
prop_exists[PERF_CPU_IRQ_LATENCY] ?
PROP_VALUE_ACCESS(prop_value, PERF_CPU_IRQ_LATENCY, 0) :
PM_QOS_DEFAULT_VALUE;
return 0;
}

View File

@@ -1317,6 +1317,7 @@ struct sde_sc_cfg {
* @cpu_mask: pm_qos cpu mask value
* @cpu_mask_perf: pm_qos cpu silver core mask value
* @cpu_dma_latency: pm_qos cpu dma latency value
* @cpu_irq_latency: pm_qos cpu irq latency value
* @axi_bus_width: axi bus width value in bytes
* @num_mnoc_ports: number of mnoc ports
*/
@@ -1348,6 +1349,7 @@ struct sde_perf_cfg {
unsigned long cpu_mask;
unsigned long cpu_mask_perf;
u32 cpu_dma_latency;
u32 cpu_irq_latency;
u32 axi_bus_width;
u32 num_mnoc_ports;
};

View File

@@ -299,7 +299,12 @@ static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
sec_sid = (uint32_t *) shm.vaddr;
mem_addr = shm.paddr;
mem_size = shm.size;
/**
* SMMUSecureModeSwitch requires the size to be number of SID's
* but shm allocates size in pages. Modify the args as per
* client requirement.
*/
mem_size = sizeof(uint32_t) * num_sids;
} else {
sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
if (!sec_sid)
@@ -1500,7 +1505,9 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
/* update display cap to MST_MODE for DP MST encoders */
info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
for (idx = 0; idx < sde_kms->dp_stream_count; idx++) {
for (idx = 0; idx < sde_kms->dp_stream_count &&
priv->num_encoders < max_encoders; idx++) {
info.h_tile_instance[0] = idx;
encoder = sde_encoder_init(dev, &info);
if (IS_ERR_OR_NULL(encoder)) {
@@ -2984,6 +2991,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
}
}
sde_kms->base.aspace = sde_kms->aspace[0];
return 0;
early_map_fail:
@@ -3048,6 +3057,7 @@ static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
{
struct device *cpu_dev;
int cpu = 0;
u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
@@ -3064,12 +3074,12 @@ static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
sde_kms->catalog->perf.cpu_dma_latency);
cpu_irq_latency);
else
dev_pm_qos_add_request(cpu_dev,
&sde_kms->pm_qos_irq_req[cpu],
DEV_PM_QOS_RESUME_LATENCY,
sde_kms->catalog->perf.cpu_dma_latency);
cpu_irq_latency);
}
}

View File

@@ -2619,6 +2619,10 @@ static int _sde_plane_sspp_atomic_check_helper(struct sde_plane *psde,
SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u, %ux%u\n",
dst.x, dst.y, dst.w, dst.h);
ret = -EINVAL;
} else if (SDE_FORMAT_IS_UBWC(fmt) &&
!psde->catalog->ubwc_version) {
SDE_ERROR_PLANE(psde, "ubwc not supported\n");
ret = -EINVAL;
}
return ret;

View File

@@ -2074,11 +2074,13 @@ static struct drm_connector *_sde_rm_get_connector(
struct drm_encoder *enc)
{
struct drm_connector *conn = NULL, *conn_search;
struct sde_connector *c_conn = NULL;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(enc->dev, &conn_iter);
drm_for_each_connector_iter(conn_search, &conn_iter) {
if (conn_search->encoder == enc) {
c_conn = to_sde_connector(conn_search);
if (c_conn->encoder == enc) {
conn = conn_search;
break;
}
@@ -2286,10 +2288,10 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc, bool nxt)
conn = _sde_rm_get_connector(enc);
if (!conn) {
SDE_DEBUG("failed to get connector for enc %d, nxt %d",
enc->base.id, nxt);
SDE_EVT32(enc->base.id, 0x0, 0xffffffff);
_sde_rm_release_rsvp(rm, rsvp, conn);
SDE_DEBUG("failed to get conn for enc %d nxt %d rsvp[s%de%d]\n",
enc->base.id, nxt, rsvp->seq, rsvp->enc_id);
goto end;
}
@@ -2401,7 +2403,7 @@ int sde_rm_reserve(
* comes again after earlier commit gets processed.
*/
if (test_only && rsvp_nxt) {
if (test_only && rsvp_cur && rsvp_nxt) {
SDE_ERROR("cur %d nxt %d enc %d conn %d\n", rsvp_cur->seq,
rsvp_nxt->seq, enc->base.id,
conn_state->connector->base.id);