Merge changes I00518e84,I08f66c0e,I2948bc6e,I21bc67b4,I79acaf83,I2f8ffe6e into display-kernel.lnx.1.0

* changes:
  disp: msm: sde: use device tree node to enable INTF TE capability
  disp: msm: sde: refactor sde_hw_interrupts to use offsets from catalog
  disp: msm: sde: get INTF TEAR IRQ offsets from device tree
  disp: msm: sde: rename MDSS_INTR_* enums to SDE_INTR_*
  disp: msm: sde: add Lahaina version checks
  disp: msm: sde: move all hw version checks in to the catalog
This commit is contained in:
Linux Build Service Account
2019-12-04 17:27:53 -08:00
committato da Gerrit - the friendly Code Review server
8 ha cambiato i file con 508 aggiunte e 553 eliminazioni

Vedi File

@@ -1801,6 +1801,15 @@ static void sde_encoder_phys_cmd_init_ops(struct sde_encoder_phys_ops *ops)
ops->collect_misr = sde_encoder_helper_collect_misr;
}
static inline bool sde_encoder_phys_cmd_intf_te_supported(
const struct sde_mdss_cfg *sde_cfg, enum sde_intf idx)
{
if (sde_cfg && ((idx - INTF_0) < sde_cfg->intf_count))
return test_bit(SDE_INTF_TE,
&(sde_cfg->intf[idx - INTF_0].features));
return false;
}
struct sde_encoder_phys *sde_encoder_phys_cmd_init(
struct sde_enc_phys_init_params *p)
{
@@ -1841,10 +1850,8 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->comp_type = p->comp_type;
if (sde_hw_intf_te_supported(phys_enc->sde_kms->catalog))
phys_enc->has_intf_te = true;
else
phys_enc->has_intf_te = false;
phys_enc->has_intf_te = sde_encoder_phys_cmd_intf_te_supported(
phys_enc->sde_kms->catalog, phys_enc->intf_idx);
for (i = 0; i < INTR_IDX_MAX; i++) {
irq = &phys_enc->irq[i];

Vedi File

@@ -277,6 +277,7 @@ enum {
INTF_LEN,
INTF_PREFETCH,
INTF_TYPE,
INTF_TE_IRQ,
INTF_PROP_MAX,
};
@@ -709,6 +710,7 @@ static struct sde_prop_type intf_prop[] = {
{INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
PROP_TYPE_U32_ARRAY},
{INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
{INTF_TE_IRQ, "qcom,sde-intf-tear-irq-off", false, PROP_TYPE_U32_ARRAY},
};
static struct sde_prop_type wb_prop[] = {
@@ -1058,6 +1060,74 @@ end:
return rc;
}
static int _add_to_irq_offset_list(struct sde_mdss_cfg *sde_cfg,
enum sde_intr_hwblk_type blk_type, u32 instance, u32 offset)
{
struct sde_intr_irq_offsets *item = NULL;
bool err = false;
switch (blk_type) {
case SDE_INTR_HWBLK_TOP:
if (instance >= SDE_INTR_TOP_MAX)
err = true;
break;
case SDE_INTR_HWBLK_INTF:
if (instance >= INTF_MAX)
err = true;
break;
case SDE_INTR_HWBLK_AD4:
if (instance >= AD_MAX)
err = true;
break;
case SDE_INTR_HWBLK_INTF_TEAR:
if (instance >= INTF_MAX)
err = true;
break;
case SDE_INTR_HWBLK_LTM:
if (instance >= LTM_MAX)
err = true;
break;
default:
SDE_ERROR("invalid hwblk_type: %d", blk_type);
return -EINVAL;
}
if (err) {
SDE_ERROR("unable to map instance %d for blk type %d",
instance, blk_type);
return -EINVAL;
}
/* Check for existing list entry */
item = sde_hw_intr_list_lookup(sde_cfg, blk_type, instance);
if (IS_ERR_OR_NULL(item)) {
SDE_DEBUG("adding intr type %d idx %d offset 0x%x\n",
blk_type, instance, offset);
} else if (item->base_offset == offset) {
SDE_INFO("duplicate intr %d/%d offset 0x%x, skipping\n",
blk_type, instance, offset);
return 0;
} else {
SDE_ERROR("type %d, idx %d in list with offset 0x%x != 0x%x\n",
blk_type, instance, item->base_offset, offset);
return -EINVAL;
}
item = kzalloc(sizeof(*item), GFP_KERNEL);
if (!item) {
SDE_ERROR("memory allocation failed!\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&item->list);
item->type = blk_type;
item->instance_idx = instance;
item->base_offset = offset;
list_add_tail(&item->list, &sde_cfg->irq_offset_list);
return 0;
}
static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
@@ -1210,6 +1280,9 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
sblk->llcc_slice_size =
sde_cfg->sc_cfg.llcc_slice_size;
}
if (sde_cfg->inline_disable_const_clr)
set_bit(SDE_SSPP_INLINE_CONST_CLR, &sspp->features);
}
static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
@@ -1850,6 +1923,8 @@ static int sde_mixer_parse_dt(struct device_node *np,
set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
if (sde_cfg->has_dim_layer)
set_bit(SDE_DIM_LAYER, &mixer->features);
if (sde_cfg->has_mixer_combined_alpha)
set_bit(SDE_MIXER_COMBINED_ALPHA, &mixer->features);
of_property_read_string_index(np,
mixer_prop[MIXER_DISP].prop_name, i, &disp_pref);
@@ -1941,6 +2016,11 @@ static int sde_intf_parse_dt(struct device_node *np,
if (!prop_exists[INTF_LEN])
intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
rc = _add_to_irq_offset_list(sde_cfg, SDE_INTR_HWBLK_INTF,
intf->id, intf->base);
if (rc)
goto end;
intf->prog_fetch_lines_worst_case =
!prop_exists[INTF_PREFETCH] ?
sde_cfg->perf.min_prefill_lines :
@@ -1968,11 +2048,19 @@ static int sde_intf_parse_dt(struct device_node *np,
if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
set_bit(SDE_INTF_INPUT_CTRL, &intf->features);
if (IS_SDE_MAJOR_SAME((sde_cfg->hwversion),
SDE_HW_VER_500) ||
IS_SDE_MAJOR_SAME((sde_cfg->hwversion),
SDE_HW_VER_600))
if (prop_exists[INTF_TE_IRQ])
intf->te_irq_offset = PROP_VALUE_ACCESS(prop_value,
INTF_TE_IRQ, i);
if (intf->te_irq_offset) {
rc = _add_to_irq_offset_list(sde_cfg,
SDE_INTR_HWBLK_INTF_TEAR,
intf->id, intf->te_irq_offset);
if (rc)
goto end;
set_bit(SDE_INTF_TE, &intf->features);
}
}
end:
@@ -2437,6 +2525,11 @@ static int sde_dspp_parse_dt(struct device_node *np,
sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
AD_VERSION, 0);
set_bit(SDE_DSPP_AD, &dspp->features);
rc = _add_to_irq_offset_list(sde_cfg,
SDE_INTR_HWBLK_AD4, dspp->id,
dspp->base + sblk->ad.base);
if (rc)
goto end;
}
sblk->ltm.id = SDE_DSPP_LTM;
@@ -2448,6 +2541,11 @@ static int sde_dspp_parse_dt(struct device_node *np,
sblk->ltm.version = PROP_VALUE_ACCESS(ltm_prop_value,
LTM_VERSION, 0);
set_bit(SDE_DSPP_LTM, &dspp->features);
rc = _add_to_irq_offset_list(sde_cfg,
SDE_INTR_HWBLK_LTM, dspp->id,
dspp->base + sblk->ltm.base);
if (rc)
goto end;
}
}
@@ -2902,6 +3000,8 @@ static int _sde_vbif_populate(struct sde_mdss_cfg *sde_cfg,
for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
vbif->memtype[k++] = PROP_VALUE_ACCESS(
prop_value, VBIF_MEMTYPE_1, j);
if (sde_cfg->vbif_disable_inner_outer_shareable)
set_bit(SDE_VBIF_DISABLE_SHAREABLE, &vbif->features);
return 0;
}
@@ -3363,6 +3463,21 @@ static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
if (major_version < SDE_HW_MAJOR(SDE_HW_VER_500))
set_bit(SDE_MDP_VSYNC_SEL, &cfg->mdp[0].features);
rc = _add_to_irq_offset_list(cfg, SDE_INTR_HWBLK_TOP,
SDE_INTR_TOP_INTR, cfg->mdp[0].base);
if (rc)
goto end;
rc = _add_to_irq_offset_list(cfg, SDE_INTR_HWBLK_TOP,
SDE_INTR_TOP_INTR2, cfg->mdp[0].base);
if (rc)
goto end;
rc = _add_to_irq_offset_list(cfg, SDE_INTR_HWBLK_TOP,
SDE_INTR_TOP_HIST_INTR, cfg->mdp[0].base);
if (rc)
goto end;
if (prop_exists[SEC_SID_MASK]) {
cfg->sec_sid_mask_count = prop_count[SEC_SID_MASK];
for (i = 0; i < cfg->sec_sid_mask_count; i++)
@@ -4045,29 +4160,28 @@ static void _sde_hw_setup_uidle(struct sde_uidle_cfg *uidle_cfg)
static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
{
int i, rc = 0;
int rc = 0;
if (!sde_cfg)
return -EINVAL;
for (i = 0; i < MDSS_INTR_MAX; i++)
set_bit(i, sde_cfg->mdss_irqs);
/* default settings for *MOST* targets */
sde_cfg->has_mixer_combined_alpha = true;
/* target specific settings */
if (IS_MSM8996_TARGET(hw_rev)) {
sde_cfg->perf.min_prefill_lines = 21;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_decimation = true;
sde_cfg->has_mixer_combined_alpha = false;
} else if (IS_MSM8998_TARGET(hw_rev)) {
sde_cfg->has_wb_ubwc = true;
sde_cfg->perf.min_prefill_lines = 25;
sde_cfg->vbif_qos_nlvl = 4;
sde_cfg->ts_prefill_rev = 1;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_decimation = true;
sde_cfg->has_cursor = true;
sde_cfg->has_hdr = true;
sde_cfg->has_mixer_combined_alpha = false;
} else if (IS_SDM845_TARGET(hw_rev)) {
sde_cfg->has_wb_ubwc = true;
sde_cfg->has_cwb_support = true;
@@ -4076,8 +4190,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->ts_prefill_rev = 2;
sde_cfg->sui_misr_supported = true;
sde_cfg->sui_block_xin_mask = 0x3F71;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_decimation = true;
sde_cfg->has_hdr = true;
sde_cfg->has_vig_p010 = true;
@@ -4086,8 +4198,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->perf.min_prefill_lines = 24;
sde_cfg->vbif_qos_nlvl = 8;
sde_cfg->ts_prefill_rev = 2;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_decimation = true;
sde_cfg->has_hdr = true;
sde_cfg->has_vig_p010 = true;
@@ -4110,9 +4220,8 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_sui_blendstage = true;
sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_decimation = true;
sde_cfg->vbif_disable_inner_outer_shareable = true;
} else if (IS_SDMSHRIKE_TARGET(hw_rev)) {
sde_cfg->has_wb_ubwc = true;
sde_cfg->perf.min_prefill_lines = 24;
@@ -4120,8 +4229,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->ts_prefill_rev = 2;
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
sde_cfg->delay_prg_fetch_start = true;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_decimation = true;
sde_cfg->has_hdr = true;
sde_cfg->has_vig_p010 = true;
@@ -4140,10 +4247,9 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_sui_blendstage = true;
sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_hdr = true;
sde_cfg->has_vig_p010 = true;
sde_cfg->vbif_disable_inner_outer_shareable = true;
} else if (IS_SDMMAGPIE_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
sde_cfg->has_wb_ubwc = true;
@@ -4159,6 +4265,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_sui_blendstage = true;
sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
sde_cfg->vbif_disable_inner_outer_shareable = true;
} else if (IS_KONA_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
sde_cfg->has_wb_ubwc = true;
@@ -4174,8 +4281,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_sui_blendstage = true;
sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_hdr = true;
sde_cfg->has_hdr_plus = true;
set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
@@ -4191,6 +4296,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->true_inline_prefill_lines_nv12 = 32;
sde_cfg->true_inline_prefill_lines = 48;
sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_0;
sde_cfg->inline_disable_const_clr = true;
} else if (IS_SAIPAN_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
sde_cfg->has_wb_ubwc = true;
@@ -4206,8 +4312,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->has_sui_blendstage = true;
sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
sde_cfg->has_hdr = true;
sde_cfg->has_hdr_plus = true;
set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
@@ -4222,6 +4326,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->true_inline_prefill_fudge_lines = 2;
sde_cfg->true_inline_prefill_lines_nv12 = 32;
sde_cfg->true_inline_prefill_lines = 48;
sde_cfg->inline_disable_const_clr = true;
} else if (IS_SDMTRINKET_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
sde_cfg->has_qsync = true;
@@ -4235,6 +4340,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_block_xin_mask = 0xC61;
sde_cfg->has_hdr = false;
sde_cfg->has_sui_blendstage = true;
sde_cfg->vbif_disable_inner_outer_shareable = true;
} else if (IS_BENGAL_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = false;
sde_cfg->has_qsync = true;
@@ -4248,6 +4354,36 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_block_xin_mask = 0xC01;
sde_cfg->has_hdr = false;
sde_cfg->has_sui_blendstage = true;
sde_cfg->vbif_disable_inner_outer_shareable = true;
} else if (IS_LAHAINA_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
sde_cfg->has_wb_ubwc = true;
sde_cfg->has_qsync = true;
sde_cfg->perf.min_prefill_lines = 24;
sde_cfg->vbif_qos_nlvl = 8;
sde_cfg->ts_prefill_rev = 2;
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
sde_cfg->delay_prg_fetch_start = true;
sde_cfg->sui_ns_allowed = true;
sde_cfg->sui_misr_supported = true;
sde_cfg->sui_block_xin_mask = 0x3F71;
sde_cfg->has_3d_merge_reset = true;
sde_cfg->has_hdr = true;
sde_cfg->has_hdr_plus = true;
set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
sde_cfg->has_vig_p010 = true;
sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_1_0_0;
sde_cfg->true_inline_dwnscale_rt_num =
MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR;
sde_cfg->true_inline_dwnscale_rt_denom =
MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR;
sde_cfg->true_inline_dwnscale_nrt =
MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT;
sde_cfg->true_inline_prefill_fudge_lines = 2;
sde_cfg->true_inline_prefill_lines_nv12 = 32;
sde_cfg->true_inline_prefill_lines = 48;
sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_0;
sde_cfg->vbif_disable_inner_outer_shareable = true;
} else {
SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -4328,6 +4464,8 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
if (!sde_cfg)
return;
sde_hw_catalog_irq_offset_list_delete(&sde_cfg->irq_offset_list);
for (i = 0; i < sde_cfg->sspp_count; i++)
kfree(sde_cfg->sspp[i].sblk);
@@ -4388,6 +4526,7 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
return ERR_PTR(-ENOMEM);
sde_cfg->hwversion = hw_rev;
INIT_LIST_HEAD(&sde_cfg->irq_offset_list);
rc = _sde_hardware_pre_caps(sde_cfg, hw_rev);
if (rc)

Vedi File

@@ -30,29 +30,25 @@
#define SDE_HW_STEP(rev) ((rev) & 0xFFFF)
#define SDE_HW_MAJOR_MINOR(rev) ((rev) >> 16)
#define IS_SDE_MAJOR_SAME(rev1, rev2) \
(SDE_HW_MAJOR((rev1)) == SDE_HW_MAJOR((rev2)))
#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
(SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
#define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
#define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
#define SDE_HW_VER_301 SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */
#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
#define SDE_HW_VER_401 SDE_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define SDE_HW_VER_410 SDE_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define SDE_HW_VER_500 SDE_HW_VER(5, 0, 0) /* sm8150 v1.0 */
#define SDE_HW_VER_501 SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */
#define SDE_HW_VER_510 SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
#define SDE_HW_VER_520 SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
#define SDE_HW_VER_530 SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
#define SDE_HW_VER_540 SDE_HW_VER(5, 4, 0) /* sdmtrinket v1.0 */
#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 */
#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 */
#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* sdm845 */
#define SDE_HW_VER_410 SDE_HW_VER(4, 1, 0) /* sdm670 */
#define SDE_HW_VER_500 SDE_HW_VER(5, 0, 0) /* sm8150 */
#define SDE_HW_VER_510 SDE_HW_VER(5, 1, 0) /* sdmshrike */
#define SDE_HW_VER_520 SDE_HW_VER(5, 2, 0) /* sdmmagpie */
#define SDE_HW_VER_530 SDE_HW_VER(5, 3, 0) /* sm6150 */
#define SDE_HW_VER_540 SDE_HW_VER(5, 4, 0) /* sdmtrinket */
#define SDE_HW_VER_600 SDE_HW_VER(6, 0, 0) /* kona */
#define SDE_HW_VER_610 SDE_HW_VER(6, 1, 0) /* sm7250 */
#define SDE_HW_VER_630 SDE_HW_VER(6, 3, 0) /* bengal */
#define SDE_HW_VER_700 SDE_HW_VER(7, 0, 0) /* lahaina */
/* Avoid using below IS_XXX macros outside catalog, use feature bit instead */
#define IS_SDE_MAJOR_SAME(rev1, rev2) \
(SDE_HW_MAJOR((rev1)) == SDE_HW_MAJOR((rev2)))
#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
(SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
#define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170)
#define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300)
@@ -66,6 +62,7 @@
#define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
#define IS_SAIPAN_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_610)
#define IS_BENGAL_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_630)
#define IS_LAHAINA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_700)
#define SDE_HW_BLK_NAME_LEN 16
@@ -138,26 +135,30 @@ enum {
#define SSPP_SYS_CACHE_NO_ALLOC BIT(4)
/**
* SDE INTERRUPTS - maintains the possible hw irq's allowed by HW
* The order in this enum must match the order of the irqs defined
* by 'sde_irq_map'
* All INTRs relevant for a specific target should be enabled via
* _add_to_irq_offset_list()
*/
enum sde_intr_enum {
MDSS_INTR_SSPP_TOP0_INTR,
MDSS_INTR_SSPP_TOP0_INTR2,
MDSS_INTF_TEAR_1_INTR,
MDSS_INTF_TEAR_2_INTR,
MDSS_INTR_SSPP_TOP0_HIST_INTR,
MDSS_INTR_INTF_0_INTR,
MDSS_INTR_INTF_1_INTR,
MDSS_INTR_INTF_2_INTR,
MDSS_INTR_INTF_3_INTR,
MDSS_INTR_INTF_4_INTR,
MDSS_INTR_AD4_0_INTR,
MDSS_INTR_AD4_1_INTR,
MDSS_INTR_LTM_0_INTR,
MDSS_INTR_LTM_1_INTR,
MDSS_INTR_MAX
enum sde_intr_hwblk_type {
SDE_INTR_HWBLK_TOP,
SDE_INTR_HWBLK_INTF,
SDE_INTR_HWBLK_AD4,
SDE_INTR_HWBLK_INTF_TEAR,
SDE_INTR_HWBLK_LTM,
SDE_INTR_HWBLK_MAX
};
enum sde_intr_top_intr {
SDE_INTR_TOP_INTR = 1,
SDE_INTR_TOP_INTR2,
SDE_INTR_TOP_HIST_INTR,
SDE_INTR_TOP_MAX
};
struct sde_intr_irq_offsets {
struct list_head list;
enum sde_intr_hwblk_type type;
u32 instance_idx;
u32 base_offset;
};
/**
@@ -211,6 +212,7 @@ enum {
* @SDE_SSPP_BLOCK_SEC_UI Blocks secure-ui layers
* @SDE_SSPP_SCALER_QSEED3LITE Qseed3lite algorithm support
* @SDE_SSPP_TRUE_INLINE_ROT_V1, Support of SSPP true inline rotation v1
* @SDE_SSPP_INLINE_CONST_CLR Inline rotation requires const clr disabled
* @SDE_SSPP_MAX maximum value
*/
enum {
@@ -239,6 +241,7 @@ enum {
SDE_SSPP_BLOCK_SEC_UI,
SDE_SSPP_SCALER_QSEED3LITE,
SDE_SSPP_TRUE_INLINE_ROT_V1,
SDE_SSPP_INLINE_CONST_CLR,
SDE_SSPP_MAX
};
@@ -275,6 +278,7 @@ enum {
* @SDE_DISP_CWB_PREF Layer mixer preferred for CWB
* @SDE_DISP_PRIMARY_PREF Layer mixer preferred for primary display
* @SDE_DISP_SECONDARY_PREF Layer mixer preferred for secondary display
* @SDE_MIXER_COMBINED_ALPHA Layer mixer bg and fg alpha in single register
* @SDE_MIXER_MAX maximum value
*/
enum {
@@ -285,6 +289,7 @@ enum {
SDE_DISP_PRIMARY_PREF,
SDE_DISP_SECONDARY_PREF,
SDE_DISP_CWB_PREF,
SDE_MIXER_COMBINED_ALPHA,
SDE_MIXER_MAX
};
@@ -459,11 +464,13 @@ enum {
* VBIF sub-blocks and features
* @SDE_VBIF_QOS_OTLIM VBIF supports OT Limit
* @SDE_VBIF_QOS_REMAP VBIF supports QoS priority remap
* @SDE_VBIF_DISABLE_SHAREABLE: VBIF requires inner/outer shareables disabled
* @SDE_VBIF_MAX maximum value
*/
enum {
SDE_VBIF_QOS_OTLIM = 0x1,
SDE_VBIF_QOS_REMAP,
SDE_VBIF_DISABLE_SHAREABLE,
SDE_VBIF_MAX
};
@@ -947,12 +954,14 @@ struct sde_cdm_cfg {
* @type: Interface type(DSI, DP, HDMI)
* @controller_id: Controller Instance ID in case of multiple of intf type
* @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
* @te_irq_offset: Register offset for INTF TE IRQ block
*/
struct sde_intf_cfg {
SDE_HW_BLK_INFO;
u32 type; /* interface type*/
u32 controller_id;
u32 prog_fetch_lines_worst_case;
u32 te_irq_offset;
};
/**
@@ -1272,6 +1281,9 @@ struct sde_limit_cfg {
* @has_3d_merge_reset Supports 3D merge reset
* @has_decimation Supports decimation
* @has_qos_fl_nocalc flag to indicate QoS fill level needs no calculation
* @has_mixer_combined_alpha Mixer has single register for FG & BG alpha
* @vbif_disable_inner_outer_shareable VBIF requires disabling shareables
* @inline_disable_const_clr Disable constant color during inline rotate
* @sc_cfg: system cache configuration
* @uidle_cfg Settings for uidle feature
* @sui_misr_supported indicate if secure-ui-misr is supported
@@ -1287,7 +1299,7 @@ struct sde_limit_cfg {
* @has_cursor indicates if hardware cursor is supported
* @has_vig_p010 indicates if vig pipe supports p010 format
* @inline_rot_formats formats supported by the inline rotator feature
* @mdss_irqs bitmap with the irqs supported by the target
* @irq_offset_list list of sde_intr_irq_offsets to initialize irq table
*/
struct sde_mdss_cfg {
u32 hwversion;
@@ -1332,6 +1344,9 @@ struct sde_mdss_cfg {
bool has_3d_merge_reset;
bool has_decimation;
bool has_qos_fl_nocalc;
bool has_mixer_combined_alpha;
bool vbif_disable_inner_outer_shareable;
bool inline_disable_const_clr;
struct sde_sc_cfg sc_cfg;
@@ -1417,7 +1432,7 @@ struct sde_mdss_cfg {
struct sde_format_extended *virt_vig_formats;
struct sde_format_extended *inline_rot_formats;
DECLARE_BITMAP(mdss_irqs, MDSS_INTR_MAX);
struct list_head irq_offset_list;
};
struct sde_mdss_hw_cfg_handler {
@@ -1471,6 +1486,22 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
*/
void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
/**
* sde_hw_catalog_irq_offset_list_delete - delete the irq_offset_list
* maintained by the catalog
* @head: pointer to the catalog's irq_offset_list
*/
static inline void sde_hw_catalog_irq_offset_list_delete(
struct list_head *head)
{
struct sde_intr_irq_offsets *item, *tmp;
list_for_each_entry_safe(item, tmp, head, list) {
list_del(&item->list);
kfree(item);
}
}
/**
* sde_hw_sspp_multirect_enabled - check multirect enabled for the sspp
* @cfg: pointer to sspp cfg
@@ -1481,9 +1512,4 @@ static inline bool sde_hw_sspp_multirect_enabled(const struct sde_sspp_cfg *cfg)
test_bit(SDE_SSPP_SMART_DMA_V2, &cfg->features) ||
test_bit(SDE_SSPP_SMART_DMA_V2p5, &cfg->features);
}
static inline bool sde_hw_intf_te_supported(const struct sde_mdss_cfg *sde_cfg)
{
return test_bit(SDE_INTF_TE, &(sde_cfg->intf[0].features));
}
#endif /* _SDE_HW_CATALOG_H */

Vedi File

@@ -13,27 +13,17 @@
/**
* Register offsets in MDSS register file for the interrupt registers
* w.r.t. to the MDSS base
* w.r.t. base for that block. Base offsets for IRQs should come from the
* device tree and get stored in the catalog(irq_offset_list) until they
* are added to the sde_irq_tbl during the table initialization.
*/
#define HW_INTR_STATUS 0x0010
#define MDP_SSPP_TOP0_OFF 0x1000
#define MDP_INTF_0_OFF 0x6B000
#define MDP_INTF_1_OFF 0x6B800
#define MDP_INTF_2_OFF 0x6C000
#define MDP_INTF_3_OFF 0x6C800
#define MDP_INTF_4_OFF 0x6D000
#define MDP_AD4_0_OFF 0x7D000
#define MDP_AD4_1_OFF 0x7E000
#define MDP_AD4_INTR_EN_OFF 0x41c
#define MDP_AD4_INTR_CLEAR_OFF 0x424
#define MDP_AD4_INTR_STATUS_OFF 0x420
#define MDP_INTF_TEAR_INTF_1_IRQ_OFF 0x6E800
#define MDP_INTF_TEAR_INTF_2_IRQ_OFF 0x6E900
#define MDP_INTF_TEAR_INTR_EN_OFF 0x0
#define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
#define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
#define MDP_LTM_0_OFF 0x7F000
#define MDP_LTM_1_OFF 0x7F100
#define MDP_INTF_TEAR_INTR_STATUS_OFF 0x4
#define MDP_INTF_TEAR_INTR_CLEAR_OFF 0x8
#define MDP_LTM_INTR_EN_OFF 0x50
#define MDP_LTM_INTR_STATUS_OFF 0x54
#define MDP_LTM_INTR_CLEAR_OFF 0x58
@@ -206,8 +196,6 @@
* @clr_off: offset to CLEAR reg
* @en_off: offset to ENABLE reg
* @status_off: offset to STATUS reg
* @sde_irq_idx; global index in the 'sde_irq_map' table,
* to know which interrupt type, instance, mask, etc. to use
* @map_idx_start first offset in the sde_irq_map table
* @map_idx_end last offset in the sde_irq_map table
*/
@@ -215,7 +203,6 @@ struct sde_intr_reg {
u32 clr_off;
u32 en_off;
u32 status_off;
int sde_irq_idx;
u32 map_idx_start;
u32 map_idx_end;
};
@@ -226,7 +213,7 @@ struct sde_intr_reg {
* @instance_idx: instance index of the associated HW block in SDE
* @irq_mask: corresponding bit in the interrupt status reg
* @reg_idx: index in the 'sde_irq_tbl' table, to know which
* registers offsets to use. -1 = invalid offset
* registers offsets to use.
*/
struct sde_irq_type {
u32 intr_type;
@@ -240,11 +227,13 @@ struct sde_irq_type {
* a matching interface type and instance index.
* Each of these tables are copied to a dynamically allocated
* table, that will be used to service each of the irqs
* -1 indicates an uninitialized value which should be set when copying
* these tables to the sde_irq_map.
*/
static struct sde_irq_type sde_irq_intr_map[] = {
{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
@@ -304,7 +293,6 @@ static struct sde_irq_type sde_irq_intr2_map[] = {
{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
SDE_INTR_PING_PONG_S0_WR_PTR, -1},
@@ -363,15 +351,12 @@ static struct sde_irq_type sde_irq_intr2_map[] = {
};
static struct sde_irq_type sde_irq_hist_map[] = {
{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
@@ -382,11 +367,9 @@ static struct sde_irq_type sde_irq_hist_map[] = {
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
@@ -395,167 +378,46 @@ static struct sde_irq_type sde_irq_hist_map[] = {
SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
};
static struct sde_irq_type sde_irq_intf0_map[] = {
{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
static struct sde_irq_type sde_irq_intf_map[] = {
{ SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
SDE_INTR_VIDEO_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
SDE_INTR_VIDEO_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
{ SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
SDE_INTR_DSICMD_0_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
{ SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
SDE_INTR_DSICMD_1_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
{ SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
SDE_INTR_DSICMD_2_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, -1},
{ SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
};
static struct sde_irq_type sde_irq_inf1_map[] = {
{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
SDE_INTR_VIDEO_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
SDE_INTR_VIDEO_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
SDE_INTR_DSICMD_0_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
SDE_INTR_DSICMD_1_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
SDE_INTR_DSICMD_2_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, -1},
static struct sde_irq_type sde_irq_ad4_map[] = {
{ SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
};
static struct sde_irq_type sde_irq_intf2_map[] = {
{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
SDE_INTR_VIDEO_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
SDE_INTR_VIDEO_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
SDE_INTR_DSICMD_0_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
SDE_INTR_DSICMD_1_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
SDE_INTR_DSICMD_2_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, -1},
};
static struct sde_irq_type sde_irq_intf3_map[] = {
{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
SDE_INTR_VIDEO_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
SDE_INTR_VIDEO_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
SDE_INTR_DSICMD_0_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
SDE_INTR_DSICMD_1_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
SDE_INTR_DSICMD_2_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, -1},
};
static struct sde_irq_type sde_irq_inf4_map[] = {
{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
SDE_INTR_VIDEO_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
SDE_INTR_VIDEO_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
SDE_INTR_DSICMD_0_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
SDE_INTR_DSICMD_1_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
SDE_INTR_DSICMD_2_INTO_STATIC, -1},
{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
{ SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, -1},
};
static struct sde_irq_type sde_irq_ad4_0_map[] = {
{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, -1},
};
static struct sde_irq_type sde_irq_ad4_1_map[] = {
{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, -1},
};
static struct sde_irq_type sde_irq_intf1_te_map[] = {
{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
static struct sde_irq_type sde_irq_intf_te_map[] = {
{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
SDE_INTR_INTF_TEAR_WR_PTR, -1},
{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
SDE_INTR_INTF_TEAR_RD_PTR, -1},
{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
};
static struct sde_irq_type sde_irq_intf2_te_map[] = {
{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
SDE_INTR_INTF_TEAR_WR_PTR, -1},
{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
SDE_INTR_INTF_TEAR_RD_PTR, -1},
{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
};
static struct sde_irq_type sde_irq_ltm_0_map[] = {
{ SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_0, SDE_INTR_LTM_STATS_DONE, -1},
{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_0, SDE_INTR_LTM_STATS_WB_PB, -1},
};
static struct sde_irq_type sde_irq_ltm_1_map[] = {
{ SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_1, SDE_INTR_LTM_STATS_DONE, -1},
{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_1, SDE_INTR_LTM_STATS_WB_PB, -1},
static struct sde_irq_type sde_irq_ltm_map[] = {
{ SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
};
static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
@@ -596,7 +458,6 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
int end_idx;
u32 irq_status;
unsigned long irq_flags;
int sde_irq_idx;
if (!intr)
return;
@@ -610,11 +471,6 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
irq_status = intr->save_irq_status[reg_idx];
/* get the global offset in 'sde_irq_map' */
sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx;
if (sde_irq_idx < 0)
continue;
/*
* Each Interrupt register has dynamic range of indexes,
* initialized during hw_intr_init when sde_irq_tbl is created.
@@ -1017,6 +873,83 @@ static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
return intr_status;
}
static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
struct sde_intr_irq_offsets *item)
{
u32 base_offset;
if (!sde_irq || !item)
return -EINVAL;
base_offset = item->base_offset;
switch (item->instance_idx) {
case SDE_INTR_TOP_INTR:
sde_irq->clr_off = base_offset + INTR_CLEAR;
sde_irq->en_off = base_offset + INTR_EN;
sde_irq->status_off = base_offset + INTR_STATUS;
break;
case SDE_INTR_TOP_INTR2:
sde_irq->clr_off = base_offset + INTR2_CLEAR;
sde_irq->en_off = base_offset + INTR2_EN;
sde_irq->status_off = base_offset + INTR2_STATUS;
break;
case SDE_INTR_TOP_HIST_INTR:
sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
sde_irq->en_off = base_offset + HIST_INTR_EN;
sde_irq->status_off = base_offset + HIST_INTR_STATUS;
break;
default:
pr_err("invalid TOP intr for instance %d\n",
item->instance_idx);
return -EINVAL;
}
return 0;
}
static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
struct sde_intr_irq_offsets *item)
{
u32 base_offset, rc = 0;
if (!sde_irq || !item)
return -EINVAL;
base_offset = item->base_offset;
switch (item->type) {
case SDE_INTR_HWBLK_TOP:
rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
break;
case SDE_INTR_HWBLK_INTF:
sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
sde_irq->en_off = base_offset + INTF_INTR_EN;
sde_irq->status_off = base_offset + INTF_INTR_STATUS;
break;
case SDE_INTR_HWBLK_AD4:
sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
break;
case SDE_INTR_HWBLK_INTF_TEAR:
sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
sde_irq->status_off = base_offset +
MDP_INTF_TEAR_INTR_STATUS_OFF;
break;
case SDE_INTR_HWBLK_LTM:
sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
break;
default:
pr_err("unrecognized intr blk type %d\n",
item->type);
rc = -EINVAL;
}
return rc;
}
static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
{
ops->set_mask = sde_hw_intr_set_mask;
@@ -1050,143 +983,6 @@ static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
return &m->mdss[0];
}
static inline int _sde_hw_intr_init_sde_irq_tbl(u32 irq_tbl_size,
struct sde_intr_reg *sde_irq_tbl)
{
int idx;
struct sde_intr_reg *sde_irq;
for (idx = 0; idx < irq_tbl_size; idx++) {
sde_irq = &sde_irq_tbl[idx];
switch (sde_irq->sde_irq_idx) {
case MDSS_INTR_SSPP_TOP0_INTR:
sde_irq->clr_off =
MDP_SSPP_TOP0_OFF+INTR_CLEAR;
sde_irq->en_off =
MDP_SSPP_TOP0_OFF+INTR_EN;
sde_irq->status_off =
MDP_SSPP_TOP0_OFF+INTR_STATUS;
break;
case MDSS_INTR_SSPP_TOP0_INTR2:
sde_irq->clr_off =
MDP_SSPP_TOP0_OFF+INTR2_CLEAR;
sde_irq->en_off =
MDP_SSPP_TOP0_OFF+INTR2_EN;
sde_irq->status_off =
MDP_SSPP_TOP0_OFF+INTR2_STATUS;
break;
case MDSS_INTR_SSPP_TOP0_HIST_INTR:
sde_irq->clr_off =
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR;
sde_irq->en_off =
MDP_SSPP_TOP0_OFF+HIST_INTR_EN;
sde_irq->status_off =
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS;
break;
case MDSS_INTR_INTF_0_INTR:
sde_irq->clr_off =
MDP_INTF_0_OFF+INTF_INTR_CLEAR;
sde_irq->en_off =
MDP_INTF_0_OFF+INTF_INTR_EN;
sde_irq->status_off =
MDP_INTF_0_OFF+INTF_INTR_STATUS;
break;
case MDSS_INTR_INTF_1_INTR:
sde_irq->clr_off =
MDP_INTF_1_OFF+INTF_INTR_CLEAR;
sde_irq->en_off =
MDP_INTF_1_OFF+INTF_INTR_EN;
sde_irq->status_off =
MDP_INTF_1_OFF+INTF_INTR_STATUS;
break;
case MDSS_INTR_INTF_2_INTR:
sde_irq->clr_off =
MDP_INTF_2_OFF+INTF_INTR_CLEAR;
sde_irq->en_off =
MDP_INTF_2_OFF+INTF_INTR_EN;
sde_irq->status_off =
MDP_INTF_2_OFF+INTF_INTR_STATUS;
break;
case MDSS_INTR_INTF_3_INTR:
sde_irq->clr_off =
MDP_INTF_3_OFF+INTF_INTR_CLEAR;
sde_irq->en_off =
MDP_INTF_3_OFF+INTF_INTR_EN;
sde_irq->status_off =
MDP_INTF_3_OFF+INTF_INTR_STATUS;
break;
case MDSS_INTR_INTF_4_INTR:
sde_irq->clr_off =
MDP_INTF_4_OFF+INTF_INTR_CLEAR;
sde_irq->en_off =
MDP_INTF_4_OFF+INTF_INTR_EN;
sde_irq->status_off =
MDP_INTF_4_OFF+INTF_INTR_STATUS;
break;
case MDSS_INTR_AD4_0_INTR:
sde_irq->clr_off =
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF;
sde_irq->en_off =
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF;
sde_irq->status_off =
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF;
break;
case MDSS_INTR_AD4_1_INTR:
sde_irq->clr_off =
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF;
sde_irq->en_off =
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF;
sde_irq->status_off =
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF;
break;
case MDSS_INTF_TEAR_1_INTR:
sde_irq->clr_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
MDP_INTF_TEAR_INTR_CLEAR_OFF;
sde_irq->en_off =
MDP_INTF_TEAR_INTF_1_IRQ_OFF +
MDP_INTF_TEAR_INTR_EN_OFF;
sde_irq->status_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
MDP_INTF_TEAR_INTR_STATUS_OFF;
break;
case MDSS_INTF_TEAR_2_INTR:
sde_irq->clr_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
MDP_INTF_TEAR_INTR_CLEAR_OFF;
sde_irq->en_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
MDP_INTF_TEAR_INTR_EN_OFF;
sde_irq->status_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
MDP_INTF_TEAR_INTR_STATUS_OFF;
break;
case MDSS_INTR_LTM_0_INTR:
sde_irq->clr_off =
MDP_LTM_0_OFF + MDP_LTM_INTR_CLEAR_OFF;
sde_irq->en_off =
MDP_LTM_0_OFF + MDP_LTM_INTR_EN_OFF;
sde_irq->status_off =
MDP_LTM_0_OFF + MDP_LTM_INTR_STATUS_OFF;
break;
case MDSS_INTR_LTM_1_INTR:
sde_irq->clr_off =
MDP_LTM_1_OFF + MDP_LTM_INTR_CLEAR_OFF;
sde_irq->en_off =
MDP_LTM_1_OFF + MDP_LTM_INTR_EN_OFF;
sde_irq->status_off =
MDP_LTM_1_OFF + MDP_LTM_INTR_STATUS_OFF;
break;
default:
pr_err("wrong irq idx %d\n",
sde_irq->sde_irq_idx);
return -EINVAL;
}
pr_debug("idx:%d irq_idx:%d clr:0x%x en:0x%x status:0x%x\n",
idx, sde_irq->sde_irq_idx, sde_irq->clr_off,
sde_irq->en_off, sde_irq->status_off);
}
return 0;
}
void sde_hw_intr_destroy(struct sde_hw_intr *intr)
{
if (intr) {
@@ -1198,120 +994,110 @@ void sde_hw_intr_destroy(struct sde_hw_intr *intr)
}
}
static inline u32 _get_irq_map_size(int idx)
static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
{
u32 ret = 0;
switch (idx) {
case MDSS_INTR_SSPP_TOP0_INTR:
switch (inst) {
case SDE_INTR_TOP_INTR:
ret = ARRAY_SIZE(sde_irq_intr_map);
break;
case MDSS_INTR_SSPP_TOP0_INTR2:
case SDE_INTR_TOP_INTR2:
ret = ARRAY_SIZE(sde_irq_intr2_map);
break;
case MDSS_INTR_SSPP_TOP0_HIST_INTR:
case SDE_INTR_TOP_HIST_INTR:
ret = ARRAY_SIZE(sde_irq_hist_map);
break;
case MDSS_INTR_INTF_0_INTR:
ret = ARRAY_SIZE(sde_irq_intf0_map);
break;
case MDSS_INTR_INTF_1_INTR:
ret = ARRAY_SIZE(sde_irq_inf1_map);
break;
case MDSS_INTR_INTF_2_INTR:
ret = ARRAY_SIZE(sde_irq_intf2_map);
break;
case MDSS_INTR_INTF_3_INTR:
ret = ARRAY_SIZE(sde_irq_intf3_map);
break;
case MDSS_INTR_INTF_4_INTR:
ret = ARRAY_SIZE(sde_irq_inf4_map);
break;
case MDSS_INTR_AD4_0_INTR:
ret = ARRAY_SIZE(sde_irq_ad4_0_map);
break;
case MDSS_INTR_AD4_1_INTR:
ret = ARRAY_SIZE(sde_irq_ad4_1_map);
break;
case MDSS_INTF_TEAR_1_INTR:
ret = ARRAY_SIZE(sde_irq_intf1_te_map);
break;
case MDSS_INTF_TEAR_2_INTR:
ret = ARRAY_SIZE(sde_irq_intf2_te_map);
break;
case MDSS_INTR_LTM_0_INTR:
ret = ARRAY_SIZE(sde_irq_ltm_0_map);
break;
case MDSS_INTR_LTM_1_INTR:
ret = ARRAY_SIZE(sde_irq_ltm_1_map);
break;
default:
pr_err("invalid idx:%d\n", idx);
pr_err("invalid top inst:%d\n", inst);
}
return ret;
}
static inline struct sde_irq_type *_get_irq_map_addr(int idx)
static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
{
u32 ret = 0;
switch (item->type) {
case SDE_INTR_HWBLK_TOP:
ret = _get_irq_map_size_top(item->instance_idx);
break;
case SDE_INTR_HWBLK_INTF:
ret = ARRAY_SIZE(sde_irq_intf_map);
break;
case SDE_INTR_HWBLK_AD4:
ret = ARRAY_SIZE(sde_irq_ad4_map);
break;
case SDE_INTR_HWBLK_INTF_TEAR:
ret = ARRAY_SIZE(sde_irq_intf_te_map);
break;
case SDE_INTR_HWBLK_LTM:
ret = ARRAY_SIZE(sde_irq_ltm_map);
break;
default:
pr_err("invalid type: %d\n", item->type);
}
return ret;
}
static inline struct sde_irq_type *_get_irq_map_addr_top(
enum sde_intr_top_intr inst)
{
struct sde_irq_type *ret = NULL;
switch (idx) {
case MDSS_INTR_SSPP_TOP0_INTR:
switch (inst) {
case SDE_INTR_TOP_INTR:
ret = sde_irq_intr_map;
break;
case MDSS_INTR_SSPP_TOP0_INTR2:
case SDE_INTR_TOP_INTR2:
ret = sde_irq_intr2_map;
break;
case MDSS_INTR_SSPP_TOP0_HIST_INTR:
case SDE_INTR_TOP_HIST_INTR:
ret = sde_irq_hist_map;
break;
case MDSS_INTR_INTF_0_INTR:
ret = sde_irq_intf0_map;
default:
pr_err("invalid top inst:%d\n", inst);
}
return ret;
}
static inline struct sde_irq_type *_get_irq_map_addr(
struct sde_intr_irq_offsets *item)
{
struct sde_irq_type *ret = NULL;
switch (item->type) {
case SDE_INTR_HWBLK_TOP:
ret = _get_irq_map_addr_top(item->instance_idx);
break;
case MDSS_INTR_INTF_1_INTR:
ret = sde_irq_inf1_map;
case SDE_INTR_HWBLK_INTF:
ret = sde_irq_intf_map;
break;
case MDSS_INTR_INTF_2_INTR:
ret = sde_irq_intf2_map;
case SDE_INTR_HWBLK_AD4:
ret = sde_irq_ad4_map;
break;
case MDSS_INTR_INTF_3_INTR:
ret = sde_irq_intf3_map;
case SDE_INTR_HWBLK_INTF_TEAR:
ret = sde_irq_intf_te_map;
break;
case MDSS_INTR_INTF_4_INTR:
ret = sde_irq_inf4_map;
break;
case MDSS_INTR_AD4_0_INTR:
ret = sde_irq_ad4_0_map;
break;
case MDSS_INTR_AD4_1_INTR:
ret = sde_irq_ad4_1_map;
break;
case MDSS_INTF_TEAR_1_INTR:
ret = sde_irq_intf1_te_map;
break;
case MDSS_INTF_TEAR_2_INTR:
ret = sde_irq_intf2_te_map;
break;
case MDSS_INTR_LTM_0_INTR:
ret = sde_irq_ltm_0_map;
break;
case MDSS_INTR_LTM_1_INTR:
ret = sde_irq_ltm_1_map;
case SDE_INTR_HWBLK_LTM:
ret = sde_irq_ltm_map;
break;
default:
pr_err("invalid idx:%d\n", idx);
pr_err("invalid type: %d\n", item->type);
}
return ret;
}
static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
u32 irq_idx, u32 low_idx, u32 high_idx)
struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
{
int i, j = 0;
struct sde_irq_type *src = _get_irq_map_addr(irq_idx);
u32 src_size = _get_irq_map_size(irq_idx);
struct sde_irq_type *src = _get_irq_map_addr(item);
u32 src_size = _get_irq_map_size(item);
if (!src)
return -EINVAL;
@@ -1332,61 +1118,60 @@ static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
struct sde_mdss_cfg *m)
{
struct sde_intr_irq_offsets *item;
int i, idx, sde_irq_tbl_idx = 0, ret = 0;
u32 low_idx, high_idx;
u32 sde_irq_map_idx = 0;
/* Initialize the offset of the irq's in the sde_irq_map table */
for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
if (test_bit(idx, m->mdss_irqs)) {
low_idx = sde_irq_map_idx;
high_idx = low_idx + _get_irq_map_size(idx);
/* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
list_for_each_entry(item, &m->irq_offset_list, list) {
low_idx = sde_irq_map_idx;
high_idx = low_idx + _get_irq_map_size(item);
pr_debug("init[%d]=%d low:%d high:%d\n",
sde_irq_tbl_idx, idx, low_idx, high_idx);
pr_debug("init[%d]=%d low:%d high:%d\n",
sde_irq_tbl_idx, idx, low_idx, high_idx);
if (sde_irq_tbl_idx >= intr->sde_irq_size ||
sde_irq_tbl_idx < 0) {
ret = -EINVAL;
goto exit;
}
/* init sde_irq_map with the global irq mapping table */
if (_sde_copy_regs(intr->sde_irq_map,
intr->sde_irq_map_size,
idx, low_idx, high_idx)) {
ret = -EINVAL;
goto exit;
}
/* init irq map with its reg idx within the irq tbl */
for (i = low_idx; i < high_idx; i++) {
intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
pr_debug("sde_irq_map[%d].reg_idx=%d\n",
i, sde_irq_tbl_idx);
}
/* track the idx of the mapping table for this irq in
* sde_irq_map, this to only access the indexes of this
* irq during the irq dispatch
*/
intr->sde_irq_tbl[sde_irq_tbl_idx].sde_irq_idx = idx;
intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start =
low_idx;
intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end =
high_idx;
/* increment idx for both tables accordingly */
sde_irq_tbl_idx++;
sde_irq_map_idx = high_idx;
if (sde_irq_tbl_idx >= intr->sde_irq_size ||
sde_irq_tbl_idx < 0) {
ret = -EINVAL;
goto exit;
}
/* init sde_irq_map with the global irq mapping table */
if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
item, low_idx, high_idx)) {
ret = -EINVAL;
goto exit;
}
/* init irq map with its reg & instance idxs in the irq tbl */
for (i = low_idx; i < high_idx; i++) {
intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
if (item->type != SDE_INTR_HWBLK_TOP)
intr->sde_irq_map[i].instance_idx =
item->instance_idx;
pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
i, sde_irq_tbl_idx, item->instance_idx);
}
/* track the idx of the mapping table for this irq in
* sde_irq_map, this to only access the indexes of this
* irq during the irq dispatch
*/
intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
ret = _set_sde_irq_tbl_offset(
&intr->sde_irq_tbl[sde_irq_tbl_idx], item);
if (ret)
goto exit;
/* increment idx for both tables accordingly */
sde_irq_tbl_idx++;
sde_irq_map_idx = high_idx;
}
/* do this after 'sde_irq_idx is initialized in sde_irq_tbl */
ret = _sde_hw_intr_init_sde_irq_tbl(intr->sde_irq_size,
intr->sde_irq_tbl);
exit:
sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
return ret;
}
@@ -1395,10 +1180,10 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
{
struct sde_hw_intr *intr = NULL;
struct sde_mdss_base_cfg *cfg;
struct sde_intr_irq_offsets *item;
u32 irq_regs_count = 0;
u32 irq_map_count = 0;
u32 size;
int idx;
int ret = 0;
if (!addr || !m) {
@@ -1419,33 +1204,24 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
}
__setup_intr_ops(&intr->ops);
if (MDSS_INTR_MAX >= UINT_MAX) {
pr_err("max intr exceeded:%d\n", MDSS_INTR_MAX);
ret = -EINVAL;
goto exit;
}
/* check how many irq's this target supports */
for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
if (test_bit(idx, m->mdss_irqs)) {
irq_regs_count++;
size = _get_irq_map_size(idx);
if (!size || irq_map_count >= UINT_MAX - size) {
pr_err("wrong map cnt idx:%d sz:%d cnt:%d\n",
idx, size, irq_map_count);
ret = -EINVAL;
goto exit;
}
irq_map_count += size;
list_for_each_entry(item, &m->irq_offset_list, list) {
size = _get_irq_map_size(item);
if (!size || irq_map_count >= UINT_MAX - size) {
pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
irq_regs_count, item->type, item->instance_idx,
size, irq_map_count);
ret = -EINVAL;
goto exit;
}
irq_regs_count++;
irq_map_count += size;
}
if (irq_regs_count == 0 || irq_regs_count > MDSS_INTR_MAX ||
irq_map_count == 0) {
pr_err("wrong mapping of supported irqs 0x%lx\n",
m->mdss_irqs[0]);
if (irq_regs_count == 0 || irq_map_count == 0) {
pr_err("invalid irq map: %d %d\n",
irq_regs_count, irq_map_count);
ret = -EINVAL;
goto exit;
}

Vedi File

@@ -314,4 +314,23 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
* @intr: pointer to interrupts hw object
*/
void sde_hw_intr_destroy(struct sde_hw_intr *intr);
/**
* sde_hw_intr_list_lookup(): get the list entry for a given intr
* @sde_cfg: catalog containing the irq_offset_list
* @type: the sde_intr_hwblk_type to lookup
* @idx: the instance id to lookup for the specified hwblk_type
* @return: pointer to sde_intr_irq_offsets list entry, or NULL if lookup fails
*/
static inline struct sde_intr_irq_offsets *sde_hw_intr_list_lookup(
struct sde_mdss_cfg *sde_cfg, enum sde_intr_hwblk_type type, u32 idx)
{
struct sde_intr_irq_offsets *item;
list_for_each_entry(item, &sde_cfg->irq_offset_list, list) {
if (type == item->type && idx == item->instance_idx)
return item;
}
return NULL;
}
#endif

Vedi File

@@ -110,8 +110,9 @@ static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
}
}
static void sde_hw_lm_setup_blend_config_sdm845(struct sde_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
static void sde_hw_lm_setup_blend_config_combined_alpha(
struct sde_hw_mixer *ctx, u32 stage,
u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
int stage_off;
@@ -280,16 +281,9 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
unsigned long features)
{
ops->setup_mixer_out = sde_hw_lm_setup_out;
if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) ||
IS_SM8150_TARGET(m->hwversion) ||
IS_SDMSHRIKE_TARGET(m->hwversion) ||
IS_SM6150_TARGET(m->hwversion) ||
IS_SDMMAGPIE_TARGET(m->hwversion) ||
IS_KONA_TARGET(m->hwversion) ||
IS_SAIPAN_TARGET(m->hwversion) ||
IS_SDMTRINKET_TARGET(m->hwversion) ||
IS_BENGAL_TARGET(m->hwversion))
ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
if (test_bit(SDE_MIXER_COMBINED_ALPHA, &features))
ops->setup_blend_config =
sde_hw_lm_setup_blend_config_combined_alpha;
else
ops->setup_blend_config = sde_hw_lm_setup_blend_config;
ops->setup_alpha_out = sde_hw_lm_setup_color3;

Vedi File

@@ -3,7 +3,6 @@
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include "sde_hw_util.h"
#include "sde_hwio.h"
#include "sde_hw_catalog.h"
#include "sde_hw_lm.h"
@@ -299,7 +298,7 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
u32 opmode = 0;
u32 alpha_en_mask = 0, color_en_mask = 0;
u32 op_mode_off, unpack_pat_off, format_off;
u32 idx, core_rev;
u32 idx;
bool const_color_en = true;
if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
@@ -316,7 +315,6 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
}
c = &ctx->hw;
core_rev = readl_relaxed(c->base_off + 0x0);
opmode = SDE_REG_READ(c, op_mode_off + idx);
opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
@@ -354,10 +352,9 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
(fmt->unpack_align_msb << 18) |
((fmt->bpp - 1) << 9);
if(IS_SDE_MAJOR_SAME(core_rev, SDE_HW_VER_600)) {
if(flags & SDE_SSPP_ROT_90)
const_color_en = false;
}
if ((flags & SDE_SSPP_ROT_90) && test_bit(SDE_SSPP_INLINE_CONST_CLR,
&ctx->cap->features))
const_color_en = false;
if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
if (SDE_FORMAT_IS_UBWC(fmt))

Vedi File

@@ -234,10 +234,7 @@ static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
ops->set_qos_remap = sde_hw_set_qos_remap;
if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
IS_SDMMAGPIE_TARGET(m->hwversion) ||
IS_SDMTRINKET_TARGET(m->hwversion) ||
IS_BENGAL_TARGET(m->hwversion))
if (test_bit(SDE_VBIF_DISABLE_SHAREABLE, &cap))
ops->set_mem_type = sde_hw_set_mem_type_v1;
else
ops->set_mem_type = sde_hw_set_mem_type;