disp: msm: sde: add demura v2 support

Add support for demura v2 by adding demura blocks
2 and 3 for pineapple target.

Change-Id: I9e6107480ab44853ca49e6396787378c5c70557a
Signed-off-by: Mitika Dodiya <quic_mdodiya@quicinc.com>
Dieser Commit ist enthalten in:
Mitika Dodiya
2022-07-25 23:24:57 +05:30
committet von Gerrit - the friendly Code Review server
Ursprung 571d51727b
Commit c9298e3712
10 geänderte Dateien mit 293 neuen und 74 gelöschten Zeilen

Datei anzeigen

@@ -639,6 +639,12 @@ struct drm_msm_spr_udc_cfg {
#define CFG0_PARAM2_LEN 256
#define CFG5_PARAM01_LEN 4
#define CFG3_PARAM01_LEN 4
#define DEMURA_FLAG_0 (1 << 0)
#define DEMURA_FLAG_1 (1 << 1)
#define DEMURA_FLAG_2 (3 << 2)
#define DEMURA_PRECISION_0 (0 << 2)
#define DEMURA_PRECISION_1 (1 << 2)
#define DEMURA_PRECISION_2 (2 << 2)
struct drm_msm_dem_cfg {
__u64 flags;

Datei anzeigen

@@ -3456,6 +3456,7 @@ static void _dspp_demura_install_property(struct drm_crtc *crtc)
version = catalog->dspp[0].sblk->demura.version >> 16;
switch (version) {
case 1:
case 2:
_sde_cp_crtc_install_blob_property(crtc, "SDE_DEMURA_INIT_CFG_V1",
SDE_CP_CRTC_DSPP_DEMURA_INIT,
sizeof(struct drm_msm_dem_cfg));

Datei anzeigen

@@ -5381,10 +5381,10 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_4;
sde_cfg->sid_rev = SDE_SID_VERSION_2_0_0;
sde_cfg->mdss_hw_block_size = 0x158;
sde_cfg->demura_supported[SSPP_DMA1][0] = 0;
sde_cfg->demura_supported[SSPP_DMA1][1] = 1;
sde_cfg->demura_supported[SSPP_DMA3][0] = 0;
sde_cfg->demura_supported[SSPP_DMA3][1] = 1;
sde_cfg->demura_supported[SSPP_DMA1][0] = BIT(DEMURA_0) | BIT(DEMURA_2);
sde_cfg->demura_supported[SSPP_DMA1][1] = BIT(DEMURA_1) | BIT(DEMURA_3);
sde_cfg->demura_supported[SSPP_DMA3][0] = BIT(DEMURA_0) | BIT(DEMURA_2);
sde_cfg->demura_supported[SSPP_DMA3][1] = BIT(DEMURA_1) | BIT(DEMURA_3);
sde_cfg->has_line_insertion = true;
} else {
SDE_ERROR("unsupported chipset id:%X\n", hw_rev);

Datei anzeigen

@@ -703,12 +703,12 @@ void sde_demura_read_plane_status(struct sde_hw_dspp *ctx, u32 *status)
value = SDE_REG_READ(&ctx->hw, demura_base + 0x4);
if (!(value & 0x4)) {
*status = DEM_FETCH_DMA_INVALID;
} else if (ctx->idx == DSPP_0) {
} else if (ctx->idx == DSPP_0 || ctx->idx == DSPP_2) {
if (value & 0x80000000)
*status = DEM_FETCH_DMA1_RECT0;
else
*status = DEM_FETCH_DMA3_RECT0;
} else {
} else if (ctx->idx == DSPP_1 || ctx->idx == DSPP_3) {
if (value & 0x80000000)
*status = DEM_FETCH_DMA1_RECT1;
else

Datei anzeigen

@@ -386,6 +386,20 @@ static void dspp_demura(struct sde_hw_dspp *c)
sde_demura_read_plane_status;
c->ops.setup_demura_pu_config = sde_demura_pu_cfg;
}
} else if (c->cap->sblk->demura.version == SDE_COLOR_PROCESS_VER(0x2, 0x0)) {
ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_DEMURA, c->idx);
c->ops.setup_demura_cfg = NULL;
c->ops.setup_demura_backlight_cfg = NULL;
if (!ret) {
c->ops.setup_demura_cfg = reg_dmav1_setup_demurav2;
c->ops.setup_demura_backlight_cfg =
sde_demura_backlight_cfg;
c->ops.demura_read_plane_status =
sde_demura_read_plane_status;
c->ops.setup_demura_pu_config = sde_demura_pu_cfg;
} else {
SDE_ERROR("Regdma init dspp op failed for DemuraV2");
}
}
}

Datei anzeigen

@@ -228,6 +228,14 @@ enum sde_rc {
RC_MAX
};
enum sde_demura {
DEMURA_0,
DEMURA_1,
DEMURA_2,
DEMURA_3,
DEMURA_MAX
};
enum sde_ds {
DS_TOP,
DS_0,

Datei anzeigen

@@ -1029,6 +1029,8 @@ int init_v3(struct sde_hw_reg_dma *cfg)
reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE0] = 4096 * i + 0x54;
reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE1] = 4096 * i + 0x54;
}
v1_supported[DEMURA_CFG] = v1_supported[DEMURA_CFG] | DSPP2 | DSPP3;
return 0;
}

Datei anzeigen

@@ -120,6 +120,9 @@
#define REG_DMA_DSPP_GAMUT_OP_MASK 0xFFFFFFE0
#define DEMURAV1_CFG0_PARAM4_MASK 5
#define DEMURAV2_CFG0_PARAM4_MASK 7
#define LOG_FEATURE_OFF SDE_EVT32(ctx->idx, 0)
#define LOG_FEATURE_ON SDE_EVT32(ctx->idx, 1)
@@ -5858,27 +5861,6 @@ static int __reg_dmav1_setup_demurav1_cfg0(struct sde_hw_dspp *ctx,
goto quit;
}
memset(temp, 0, sizeof(u32) * 2);
for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param4); i++)
DRM_DEBUG_DRIVER("hfc gain is %d\n", dcfg->cfg0_param4[i]);
temp[0] = (dcfg->cfg0_param4[0] & REG_MASK(5)) |
((dcfg->cfg0_param4[1] & REG_MASK(5)) << 8) |
((dcfg->cfg0_param4[2] & REG_MASK(5)) << 16) |
((dcfg->cfg0_param4[3] & REG_MASK(5)) << 24);
temp[1] = (dcfg->cfg0_param4[4] & REG_MASK(5)) |
((dcfg->cfg0_param4[5] & REG_MASK(5)) << 8) |
((dcfg->cfg0_param4[6] & REG_MASK(5)) << 16) |
((dcfg->cfg0_param4[7] & REG_MASK(5)) << 24);
DRM_DEBUG_DRIVER("0x4c: value is temp[0] %x temp[1] %x\n",
temp[0], temp[1]);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4c,
temp, sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x4c: REG_BLK_WRITE_SINGLE %d len %zd buf idx %d\n",
rc, sizeof(u32) * 2, dma_write_cfg->dma_buf->index);
goto quit;
}
quit:
kvfree(temp);
return rc;
@@ -6093,13 +6075,14 @@ static bool __reg_dmav1_valid_hfc_en_cfg(struct drm_msm_dem_cfg *dcfg,
return false;
}
static int __reg_dmav1_setup_demurav1_en(struct sde_hw_dspp *ctx,
static int __reg_dmav1_setup_demura_common_en(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
struct sde_hw_cp_cfg *hw_cfg)
struct sde_hw_cp_cfg *hw_cfg,
u32 *en)
{
u32 en = 0, backl;
u32 backl;
int rc;
bool valid_hfc_cfg = false;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
@@ -6113,21 +6096,42 @@ static int __reg_dmav1_setup_demurav1_en(struct sde_hw_dspp *ctx,
return rc;
}
en = (dcfg->src_id == BIT(3)) ? 0 : BIT(31);
en |= (dcfg->cfg1_high_idx & REG_MASK(3)) << 24;
en |= (dcfg->cfg1_low_idx & REG_MASK(3)) << 20;
en |= (dcfg->c2_depth & REG_MASK(4)) << 16;
en |= (dcfg->c1_depth & REG_MASK(4)) << 12;
en |= (dcfg->c0_depth & REG_MASK(4)) << 8;
en |= (dcfg->cfg3_en) ? BIT(5) : 0;
en |= (dcfg->cfg4_en) ? BIT(4) : 0;
en |= (dcfg->cfg2_en) ? BIT(3) : 0;
*en = (dcfg->src_id == BIT(3)) ? 0 : BIT(31);
*en |= (dcfg->cfg1_high_idx & REG_MASK(3)) << 24;
*en |= (dcfg->cfg1_low_idx & REG_MASK(3)) << 20;
*en |= (dcfg->c2_depth & REG_MASK(4)) << 16;
*en |= (dcfg->c1_depth & REG_MASK(4)) << 12;
*en |= (dcfg->c0_depth & REG_MASK(4)) << 8;
*en |= (dcfg->cfg3_en) ? BIT(5) : 0;
*en |= (dcfg->cfg4_en) ? BIT(4) : 0;
*en |= (dcfg->cfg2_en) ? BIT(3) : 0;
if (dcfg->cfg0_en)
valid_hfc_cfg = __reg_dmav1_valid_hfc_en_cfg(dcfg, hw_cfg);
if (valid_hfc_cfg)
en |= (dcfg->cfg0_en) ? BIT(2) : 0;
en |= (dcfg->cfg1_en) ? BIT(1) : 0;
DRM_DEBUG_DRIVER("demura en %x\n", en);
*en |= (dcfg->cfg0_en) ? BIT(2) : 0;
*en |= (dcfg->cfg1_en) ? BIT(1) : 0;
DRM_DEBUG_DRIVER("demura common en %x\n", *en);
return rc;
}
static int __reg_dmav1_setup_demurav1_en(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
struct sde_hw_cp_cfg *hw_cfg)
{
u32 en = 0;
int rc;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
rc = __reg_dmav1_setup_demura_common_en(ctx, dcfg, dma_write_cfg, dma_ops, hw_cfg, &en);
if (rc) {
DRM_ERROR("failed reg_dmav1_setup_demura_common_en %d", rc);
return rc;
}
DRM_DEBUG_DRIVER("demura v1 en 0x%x\n", en);
SDE_EVT32(en);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4,
&en, sizeof(en), REG_SINGLE_WRITE, 0, 0, 0);
@@ -6138,6 +6142,90 @@ static int __reg_dmav1_setup_demurav1_en(struct sde_hw_dspp *ctx,
return rc;
}
static int __reg_dmav1_setup_demurav2_en(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
struct sde_hw_cp_cfg *hw_cfg)
{
u32 en = 0;
int rc, val;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
rc = __reg_dmav1_setup_demura_common_en(ctx, dcfg, dma_write_cfg, dma_ops, hw_cfg, &en);
if (rc) {
DRM_ERROR("failed reg_dmav1_setup_demura_common_en %d", rc);
return rc;
}
/* These are Demura V2 config flags */
val = (dcfg->flags & DEMURA_FLAG_2) >> 2;
if (val && val < 3)
en |= (val & REG_MASK(2)) << 28;
if (dcfg->flags & DEMURA_FLAG_1)
en |= BIT(7);
if (dcfg->flags & DEMURA_FLAG_0)
en |= BIT(6);
DRM_DEBUG_DRIVER("demura v2 en 0x%x\n", en);
SDE_EVT32(en);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4,
&en, sizeof(en), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc)
DRM_ERROR("0x4: REG_SINGLE_WRITE failed ret %d\n", rc);
return rc;
}
static int __reg_dmav1_setup_demura_cfg0_param4_common(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
uint32_t mask_bits)
{
int rc = 0;
u32 *temp = NULL, i;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
if (!dcfg->cfg0_en) {
DRM_DEBUG_DRIVER("dcfg->cfg0_en is disabled\n");
return 0;
}
temp = kvzalloc(sizeof(struct drm_msm_dem_cfg), GFP_KERNEL);
if (!temp)
return -ENOMEM;
memset(temp, 0, sizeof(u32) * 2);
for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param4); i++)
DRM_DEBUG_DRIVER("hfc gain is %d\n", dcfg->cfg0_param4[i]);
temp[0] = (dcfg->cfg0_param4[0] & REG_MASK(mask_bits)) |
((dcfg->cfg0_param4[1] & REG_MASK(mask_bits)) << 8) |
((dcfg->cfg0_param4[2] & REG_MASK(mask_bits)) << 16) |
((dcfg->cfg0_param4[3] & REG_MASK(mask_bits)) << 24);
temp[1] = (dcfg->cfg0_param4[4] & REG_MASK(mask_bits)) |
((dcfg->cfg0_param4[5] & REG_MASK(mask_bits)) << 8) |
((dcfg->cfg0_param4[6] & REG_MASK(mask_bits)) << 16) |
((dcfg->cfg0_param4[7] & REG_MASK(mask_bits)) << 24);
DRM_DEBUG_DRIVER("0x4c: value is temp[0] %x temp[1] %x\n",
temp[0], temp[1]);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4c,
temp, sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x4c: REG_BLK_WRITE_SINGLE %d len %zd buf idx %d\n",
rc, sizeof(u32) * 2, dma_write_cfg->dma_buf->index);
goto quit;
}
quit:
kvfree(temp);
return rc;
}
static int __reg_dmav1_setup_demurav1_dual_pipe(struct sde_hw_dspp *ctx,
struct sde_hw_cp_cfg *hw_cfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
@@ -6170,6 +6258,50 @@ static int __reg_dmav1_setup_demurav1_dual_pipe(struct sde_hw_dspp *ctx,
return rc;
}
static int __reg_dmav1_setup_demura_cfg_common(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
struct sde_hw_cp_cfg *hw_cfg)
{
int rc = 0;
rc = __reg_dmav1_setup_demurav1_cfg0(ctx, dcfg, dma_write_cfg,
dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg0 rc %d", rc);
return rc;
}
rc = __reg_dmav1_setup_demurav1_cfg1(ctx, dcfg, dma_write_cfg,
dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg1 rc %d", rc);
return rc;
}
rc = __reg_dmav1_setup_demurav1_cfg3(ctx, dcfg, dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg3 rc %d", rc);
return rc;
}
rc = __reg_dmav1_setup_demurav1_cfg5(ctx, dcfg, dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg5 rc %d", rc);
return rc;
}
rc = __reg_dmav1_setup_demurav1_dual_pipe(ctx, hw_cfg, dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_dual_pipe rc %d", rc);
return rc;
}
return rc;
}
void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfx)
{
struct drm_msm_dem_cfg *dcfg;
@@ -6206,42 +6338,21 @@ void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfx)
DRM_ERROR("write decode select failed ret %d\n", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg0(ctx, dcfg, &dma_write_cfg,
dma_ops, hw_cfg);
rc = __reg_dmav1_setup_demura_cfg_common(ctx, dcfg, &dma_write_cfg, dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg0 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg1(ctx, dcfg, &dma_write_cfg,
dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg1 rc %d", rc);
DRM_ERROR("failed to setup_demurav1_cfg rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg3(ctx, dcfg, &dma_write_cfg,
dma_ops);
rc = __reg_dmav1_setup_demura_cfg0_param4_common(ctx, dcfg, &dma_write_cfg,
dma_ops, DEMURAV1_CFG0_PARAM4_MASK);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg3 rc %d", rc);
DRM_ERROR("failed setup demura v1 cfg0_param4 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg5(ctx, dcfg, &dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg5 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_dual_pipe(ctx, cfx, &dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_dual_pipe rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_en(ctx, dcfg, &dma_write_cfg,
dma_ops, hw_cfg);
rc = __reg_dmav1_setup_demurav1_en(ctx, dcfg, &dma_write_cfg, dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_en rc %d", rc);
return;
@@ -6252,11 +6363,79 @@ void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfx)
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
DEMURA_CFG);
DRM_DEBUG_DRIVER("enable demura buffer size %d\n",
DRM_DEBUG_DRIVER("enable demura v1 buffer size %d\n",
dspp_buf[DEMURA_CFG][ctx->idx]->index);
LOG_FEATURE_ON;
rc = dma_ops->kick_off(&kick_off);
if (rc)
DRM_ERROR("failed to kick off ret %d\n", rc);
DRM_ERROR("failed to kick off demurav1 ret %d\n", rc);
}
void reg_dmav1_setup_demurav2(struct sde_hw_dspp *ctx, void *cfx)
{
int rc = 0;
struct drm_msm_dem_cfg *dcfg;
struct sde_hw_cp_cfg *hw_cfg = cfx;
struct sde_hw_reg_dma_ops *dma_ops;
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
struct sde_reg_dma_kickoff_cfg kick_off;
rc = reg_dma_dspp_check(ctx, cfx, DEMURA_CFG);
if (rc)
return;
if (!hw_cfg->payload) {
LOG_FEATURE_OFF;
reg_dma_demura_off(ctx, hw_cfg);
return;
}
if (hw_cfg->len != sizeof(struct drm_msm_dem_cfg)) {
DRM_ERROR("invalid sz of payload len %d exp %zd\n",
hw_cfg->len, sizeof(struct drm_msm_dem_cfg));
}
dcfg = hw_cfg->payload;
dma_ops = sde_reg_dma_get_ops();
dma_ops->reset_reg_dma_buf(dspp_buf[DEMURA_CFG][ctx->idx]);
REG_DMA_INIT_OPS(dma_write_cfg, MDSS, DEMURA_CFG,
dspp_buf[DEMURA_CFG][ctx->idx]);
REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
rc = dma_ops->setup_payload(&dma_write_cfg);
if (rc) {
DRM_ERROR("write decode select failed ret %d\n", rc);
return;
}
rc = __reg_dmav1_setup_demura_cfg_common(ctx, dcfg, &dma_write_cfg, dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed to setup_demurav2_cfg rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demura_cfg0_param4_common(ctx, dcfg, &dma_write_cfg,
dma_ops, DEMURAV2_CFG0_PARAM4_MASK);
if (rc) {
DRM_ERROR("failed setup demura v2 cfg0_param4 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav2_en(ctx, dcfg, &dma_write_cfg, dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav2_en rc %d", rc);
return;
}
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
dspp_buf[DEMURA_CFG][ctx->idx],
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
DEMURA_CFG);
DRM_DEBUG_DRIVER("enable demura v2 buffer size %d\n",
dspp_buf[DEMURA_CFG][ctx->idx]->index);
LOG_FEATURE_ON;
rc = dma_ops->kick_off(&kick_off);
if (rc)
DRM_ERROR("failed to kick off demurav2 ret %d\n", rc);
}

Datei anzeigen

@@ -360,4 +360,12 @@ void reg_dmav1_setup_spr_pu_cfgv2(struct sde_hw_dspp *ctx, void *cfg);
*/
void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfg);
/**
* reg_dmav1_setup_demurav2() - function to set up the demurav2 configuration.
* @ctx: dspp ctx info
* @cfg: pointer to struct sde_hw_cp_cfg
*/
void reg_dmav1_setup_demurav2(struct sde_hw_dspp *ctx, void *cfg);
#endif /* _SDE_HW_REG_DMA_V1_COLOR_PROC_H */

Datei anzeigen

@@ -3946,7 +3946,8 @@ static void _sde_plane_setup_capabilities_blob(struct sde_plane *psde,
index = (master_plane_id == 0) ? 0 : 1;
if (test_bit(SDE_FEATURE_DEMURA, catalog->features) &&
catalog->demura_supported[psde->pipe][index] != ~0x0)
sde_kms_info_add_keyint(info, "demura_block", index);
sde_kms_info_add_keyint(info, "demura_block",
catalog->demura_supported[psde->pipe][index]);
if (psde->features & BIT(SDE_SSPP_SEC_UI_ALLOWED))
sde_kms_info_add_keyint(info, "sec_ui_allowed", 1);