msm: sde: demura support for register dma

Demura block of dpu will use the register dma path for programming.
Change updates the register dma frame-work to allow programming of the
demura block.

Change-Id: Ie5a64df1b2fffcb84d9554e5211249892ed2d96d
Signed-off-by: Gopikrishnaiah Anandan <agopik@codeaurora.org>
This commit is contained in:
Gopikrishnaiah Anandan
2020-01-06 11:59:46 -08:00
parent a8371c6a52
commit a337e44c28
11 changed files with 643 additions and 5 deletions

View File

@@ -814,6 +814,20 @@ static int set_spr_init_feature(struct sde_hw_dspp *hw_dspp,
return ret; return ret;
} }
static int set_demura_feature(struct sde_hw_dspp *hw_dspp,
struct sde_hw_cp_cfg *hw_cfg,
struct sde_crtc *hw_crtc)
{
int ret = 0;
if (!hw_dspp || !hw_dspp->ops.setup_demura_cfg)
ret = -EINVAL;
else
hw_dspp->ops.setup_demura_cfg(hw_dspp, hw_cfg);
return ret;
}
feature_wrapper check_crtc_feature_wrappers[SDE_CP_CRTC_MAX_FEATURES]; feature_wrapper check_crtc_feature_wrappers[SDE_CP_CRTC_MAX_FEATURES];
#define setup_check_crtc_feature_wrappers(wrappers) \ #define setup_check_crtc_feature_wrappers(wrappers) \
do { \ do { \
@@ -864,6 +878,7 @@ do { \
wrappers[SDE_CP_CRTC_DSPP_LTM_HIST_CTL] = set_ltm_hist_crtl_feature; \ wrappers[SDE_CP_CRTC_DSPP_LTM_HIST_CTL] = set_ltm_hist_crtl_feature; \
wrappers[SDE_CP_CRTC_DSPP_RC_MASK] = set_rc_mask_feature; \ wrappers[SDE_CP_CRTC_DSPP_RC_MASK] = set_rc_mask_feature; \
wrappers[SDE_CP_CRTC_DSPP_SPR_INIT] = set_spr_init_feature; \ wrappers[SDE_CP_CRTC_DSPP_SPR_INIT] = set_spr_init_feature; \
wrappers[SDE_CP_CRTC_DSPP_DEMURA_INIT] = set_demura_feature; \
} while (0) } while (0)
feature_wrapper set_crtc_pu_feature_wrappers[SDE_CP_CRTC_MAX_PU_FEATURES]; feature_wrapper set_crtc_pu_feature_wrappers[SDE_CP_CRTC_MAX_PU_FEATURES];
@@ -1526,6 +1541,8 @@ static const int dspp_feature_to_sub_blk_tbl[SDE_CP_CRTC_MAX_FEATURES] = {
[SDE_CP_CRTC_DSPP_SB] = SDE_DSPP_SB, [SDE_CP_CRTC_DSPP_SB] = SDE_DSPP_SB,
[SDE_CP_CRTC_DSPP_SPR_INIT] = SDE_DSPP_SPR, [SDE_CP_CRTC_DSPP_SPR_INIT] = SDE_DSPP_SPR,
[SDE_CP_CRTC_DSPP_RC_MASK] = SDE_DSPP_RC, [SDE_CP_CRTC_DSPP_RC_MASK] = SDE_DSPP_RC,
[SDE_CP_CRTC_DSPP_DEMURA_INIT] = SDE_DSPP_DEMURA,
[SDE_CP_CRTC_DSPP_DEMURA_BACKLIGHT] = SDE_DSPP_DEMURA,
[SDE_CP_CRTC_DSPP_MAX] = SDE_DSPP_MAX, [SDE_CP_CRTC_DSPP_MAX] = SDE_DSPP_MAX,
[SDE_CP_CRTC_LM_GC] = SDE_DSPP_MAX, [SDE_CP_CRTC_LM_GC] = SDE_DSPP_MAX,
}; };
@@ -2754,7 +2771,7 @@ static void dspp_demura_install_property(struct drm_crtc *crtc)
kms = get_kms(crtc); kms = get_kms(crtc);
catalog = kms->catalog; catalog = kms->catalog;
version = catalog->dspp[0].sblk->demura.version >> 28; version = catalog->dspp[0].sblk->demura.version >> 16;
switch (version) { switch (version) {
case 1: case 1:
sde_cp_crtc_install_blob_property(crtc, "DEMURA_INIT_V1", sde_cp_crtc_install_blob_property(crtc, "DEMURA_INIT_V1",

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/ */
#include <drm/msm_drm_pp.h> #include <drm/msm_drm_pp.h>
#include "sde_hw_color_proc_common_v4.h" #include "sde_hw_color_proc_common_v4.h"
@@ -392,3 +392,19 @@ void sde_ltm_read_intr_status(struct sde_hw_dspp *ctx, u32 *status)
clear |= BIT(1) | BIT(2); clear |= BIT(1) | BIT(2);
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x58, clear); SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x58, clear);
} }
void sde_demura_backlight_cfg(struct sde_hw_dspp *dspp, u64 val)
{
u32 demura_base;
u32 backlight;
if (!dspp) {
DRM_ERROR("invalid parameter ctx %pK", dspp);
return;
}
demura_base = dspp->cap->sblk->demura.base;
backlight = (val & REG_MASK(11));
backlight |= ((val & REG_MASK_SHIFT(11, 32)) >> 16);
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->demura.base + 0x8,
backlight);
}

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _SDE_HW_COLOR_PROC_V4_H_ #ifndef _SDE_HW_COLOR_PROC_V4_H_
#define _SDE_HW_COLOR_PROC_V4_H_ #define _SDE_HW_COLOR_PROC_V4_H_
@@ -70,4 +70,11 @@ void sde_setup_dspp_ltm_hist_bufferv1(struct sde_hw_dspp *ctx, u64 addr);
*/ */
void sde_ltm_read_intr_status(struct sde_hw_dspp *dspp, u32 *status); void sde_ltm_read_intr_status(struct sde_hw_dspp *dspp, u32 *status);
/**
* sde_demura_backlight_cfg - api to set backlight for demura
* @dspp: pointer to dspp object
* @val: value of backlight
*/
void sde_demura_backlight_cfg(struct sde_hw_dspp *dspp, u64 val);
#endif /* _SDE_HW_COLOR_PROC_V4_H_ */ #endif /* _SDE_HW_COLOR_PROC_V4_H_ */

View File

@@ -284,6 +284,22 @@ static void dspp_spr(struct sde_hw_dspp *c)
} }
} }
static void dspp_demura(struct sde_hw_dspp *c)
{
int ret;
if (c->cap->sblk->demura.version == SDE_COLOR_PROCESS_VER(0x1, 0x0)) {
ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_DEMURA, c->idx);
c->ops.setup_demura_cfg = NULL;
c->ops.setup_demura_backlight_cfg = NULL;
if (!ret) {
c->ops.setup_demura_cfg = reg_dmav1_setup_demurav1;
c->ops.setup_demura_backlight_cfg =
sde_demura_backlight_cfg;
}
}
}
static void (*dspp_blocks[SDE_DSPP_MAX])(struct sde_hw_dspp *c); static void (*dspp_blocks[SDE_DSPP_MAX])(struct sde_hw_dspp *c);
static void _init_dspp_ops(void) static void _init_dspp_ops(void)
@@ -302,6 +318,7 @@ static void _init_dspp_ops(void)
dspp_blocks[SDE_DSPP_LTM] = dspp_ltm; dspp_blocks[SDE_DSPP_LTM] = dspp_ltm;
dspp_blocks[SDE_DSPP_RC] = dspp_rc; dspp_blocks[SDE_DSPP_RC] = dspp_rc;
dspp_blocks[SDE_DSPP_SPR] = dspp_spr; dspp_blocks[SDE_DSPP_SPR] = dspp_spr;
dspp_blocks[SDE_DSPP_DEMURA] = dspp_demura;
} }
static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features) static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)

View File

@@ -256,6 +256,20 @@ struct sde_hw_dspp_ops {
* @cfg: Pointer to configuration * @cfg: Pointer to configuration
*/ */
void (*setup_spr_init_config)(struct sde_hw_dspp *ctx, void *cfg); void (*setup_spr_init_config)(struct sde_hw_dspp *ctx, void *cfg);
/**
* setup_demura_cfg - function to program demura cfg
* @ctx: Pointer to dspp context
* @status: Pointer to configuration.
*/
void (*setup_demura_cfg)(struct sde_hw_dspp *ctx, void *cfg);
/**
* setup_demura_backlight_cfg - function to program demura backlight
* @ctx: Pointer to dspp context
* @status: Pointer to configuration.
*/
void (*setup_demura_backlight_cfg)(struct sde_hw_dspp *ctx, u64 val);
}; };
/** /**

View File

@@ -810,6 +810,7 @@ int init_v12(struct sde_hw_reg_dma *cfg)
GRP_MDSS_HW_BLK_SELECT); GRP_MDSS_HW_BLK_SELECT);
v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT | v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
GRP_MDSS_HW_BLK_SELECT); GRP_MDSS_HW_BLK_SELECT);
v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
return 0; return 0;
} }

View File

@@ -80,6 +80,8 @@
REG_DMA_HEADERS_BUFFER_SZ) REG_DMA_HEADERS_BUFFER_SZ)
#define SPR_INIT_MEM_SIZE ((sizeof(struct drm_msm_spr_init_cfg)) + \ #define SPR_INIT_MEM_SIZE ((sizeof(struct drm_msm_spr_init_cfg)) + \
REG_DMA_HEADERS_BUFFER_SZ) REG_DMA_HEADERS_BUFFER_SZ)
#define DEMURA_MEM_SIZE ((sizeof(struct drm_msm_dem_cfg)) + \
REG_DMA_HEADERS_BUFFER_SZ)
#define REG_MASK(n) ((BIT(n)) - 1) #define REG_MASK(n) ((BIT(n)) - 1)
#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift)) #define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
@@ -142,6 +144,7 @@ static u32 feature_map[SDE_DSPP_MAX] = {
[SDE_DSPP_HIST] = REG_DMA_FEATURES_MAX, [SDE_DSPP_HIST] = REG_DMA_FEATURES_MAX,
[SDE_DSPP_AD] = REG_DMA_FEATURES_MAX, [SDE_DSPP_AD] = REG_DMA_FEATURES_MAX,
[SDE_DSPP_RC] = RC_DATA, [SDE_DSPP_RC] = RC_DATA,
[SDE_DSPP_DEMURA] = DEMURA_CFG,
}; };
static u32 sspp_feature_map[SDE_SSPP_MAX] = { static u32 sspp_feature_map[SDE_SSPP_MAX] = {
@@ -170,6 +173,7 @@ static u32 feature_reg_dma_sz[SDE_DSPP_MAX] = {
[SDE_DSPP_MEMCOLOR] = MEMCOLOR_MEM_SIZE, [SDE_DSPP_MEMCOLOR] = MEMCOLOR_MEM_SIZE,
[SDE_DSPP_RC] = RC_MEM_SIZE, [SDE_DSPP_RC] = RC_MEM_SIZE,
[SDE_DSPP_SPR] = SPR_INIT_MEM_SIZE, [SDE_DSPP_SPR] = SPR_INIT_MEM_SIZE,
[SDE_DSPP_DEMURA] = DEMURA_MEM_SIZE,
}; };
static u32 sspp_feature_reg_dma_sz[SDE_SSPP_MAX] = { static u32 sspp_feature_reg_dma_sz[SDE_SSPP_MAX] = {
@@ -4693,3 +4697,552 @@ void reg_dmav1_setup_spr_init_cfgv1(struct sde_hw_dspp *ctx, void *cfg)
return; return;
} }
} }
static void reg_dma_demura_off(struct sde_hw_dspp *ctx,
struct sde_hw_cp_cfg *hw_cfg)
{
struct sde_hw_reg_dma_ops *dma_ops;
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
struct sde_reg_dma_kickoff_cfg kick_off;
u32 demura_base = ctx->cap->sblk->demura.base;
u32 op_mode = 0;
int rc;
dma_ops = sde_reg_dma_get_ops();
dma_ops->reset_reg_dma_buf(dspp_buf[DEMURA_CFG][ctx->idx]);
REG_DMA_INIT_OPS(dma_write_cfg, MDSS, DEMURA_CFG,
dspp_buf[DEMURA_CFG][ctx->idx]);
REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
rc = dma_ops->setup_payload(&dma_write_cfg);
if (rc) {
DRM_ERROR("write decode select failed ret %d\n", rc);
return;
}
REG_DMA_SETUP_OPS(dma_write_cfg, ctx->hw.blk_off + demura_base + 0x4,
&op_mode, sizeof(op_mode), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(&dma_write_cfg);
if (rc) {
DRM_ERROR("off(0x4): REG_SINGLE_WRITE failed ret %d\n", rc);
return;
}
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
dspp_buf[DEMURA_CFG][ctx->idx],
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
rc = dma_ops->kick_off(&kick_off);
if (rc)
DRM_ERROR("failed to kick off ret %d\n", rc);
}
static int __reg_dmav1_setup_demurav1_cfg0_c_params(
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct drm_msm_dem_cfg *dcfg,
struct sde_hw_reg_dma_ops *dma_ops,
u32 *temp, u32 temp_sz, u32 comp_index,
u32 demura_base)
{
u32 i, len;
u64 *p;
int rc;
if (temp_sz < ARRAY_SIZE(dcfg->cfg0_param2_c0) * 8 || comp_index > 2) {
DRM_ERROR("exp sz %zd act sz %d comp index %d\n",
ARRAY_SIZE(dcfg->cfg0_param2_c0) * 8,
temp_sz, comp_index);
return -EINVAL;
}
memset(temp, 0x0, ARRAY_SIZE(dcfg->cfg0_param2_c0) * 8);
if (comp_index == 0) {
len = 1 << dcfg->c0_depth;
p = dcfg->cfg0_param2_c0;
} else if (comp_index == 1) {
len = 1 << dcfg->c1_depth;
p = dcfg->cfg0_param2_c1;
} else {
len = 1 << dcfg->c2_depth;
p = dcfg->cfg0_param2_c2;
}
if (!len || len > 256) {
DRM_ERROR("invalid len %d Max 256\n", len);
return -EINVAL;
}
i = ((comp_index & 0x3) << 28) | BIT(31);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x68,
&i, sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x68: REG_SINGLE_WRITE failed ret %d\n", rc);
return rc;
}
for (i = 0; i < len; i++) {
temp[i * 2] = p[i] & REG_MASK(32);
temp[i * 2 + 1] = (p[i] & REG_MASK_SHIFT(10, 32)) >> 32;
DRM_DEBUG_DRIVER("0x6c: index %d value %x\n",
i * 2, temp[i * 2]);
DRM_DEBUG_DRIVER("0x6c: index %d value %x\n",
i * 2 + 1, temp[i * 2 + 1]);
}
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x6c,
temp, sizeof(u64) * len, REG_BLK_WRITE_INC, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x6c: REG_BLK_WRITE_INC failed ret %d\n", rc);
return rc;
}
return rc;
}
static int __reg_dmav1_setup_demurav1_cfg0(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
struct sde_hw_cp_cfg *hw_cfg)
{
u32 *temp = NULL, i, *p = NULL, shift, width;
int rc;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
if (!dcfg->cfg0_en) {
DRM_DEBUG_DRIVER("dcfg->cfg0_en is disabled\n");
return 0;
}
temp = kzalloc(sizeof(struct drm_msm_dem_cfg), GFP_KERNEL);
if (!temp)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dcfg->cfg01_param0); i += 2) {
temp[i >> 1] = (dcfg->cfg01_param0[i] & REG_MASK(12)) |
((dcfg->cfg01_param0[i + 1] & REG_MASK(12)) << 16);
DRM_DEBUG_DRIVER("0x1c: index %d value %x\n", i >> 1,
temp[i >> 1]);
}
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x1c,
temp, sizeof(u32) * 4, REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x1c: write err %d len %zd buffer index %d\n",
rc, sizeof(u32) * 4, dma_write_cfg->dma_buf->index);
goto quit;
}
memset(temp, 0, ARRAY_SIZE(dcfg->cfg0_param1));
for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param1); i++) {
p = (i < 4) ? &temp[0] : &temp[1];
shift = (8 * i) % 32;
*p |= (((dcfg->cfg0_param1[i] & 0x3f) & REG_MASK(6)) << shift);
DRM_DEBUG_DRIVER("0xc: index %d value %x val %x shift %d\n",
i, *p, (dcfg->cfg0_param1[i] & 0x3f), shift);
}
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xc,
temp, sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0xc: write err %d len %zd buffer index %d\n",
rc, sizeof(u32) * 2, dma_write_cfg->dma_buf->index);
goto quit;
}
for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param0); i++)
DRM_DEBUG_DRIVER("0x2c: index %d value %x\n",
i, dcfg->cfg0_param0[i]);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x2c,
dcfg->cfg0_param0, ARRAY_SIZE(dcfg->cfg0_param0) * sizeof(u32),
REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x2c: write err %d len %zd buf idx %d\n",
rc, ARRAY_SIZE(dcfg->cfg0_param0) * sizeof(u32),
dma_write_cfg->dma_buf->index);
goto quit;
}
shift = 0;
memset(temp, 0, ARRAY_SIZE(dcfg->cfg0_param3_c0) * 3 * sizeof(u32));
for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param3_c0); i += 4) {
temp[shift] = (dcfg->cfg0_param3_c0[i] & REG_MASK(8)) |
(dcfg->cfg0_param3_c0[i + 1] & REG_MASK(8) << 8) |
(dcfg->cfg0_param3_c0[i + 2] & REG_MASK(8) << 16) |
(dcfg->cfg0_param3_c0[i + 3] & REG_MASK(8) << 23);
temp[shift + 2] = (dcfg->cfg0_param3_c1[i] & REG_MASK(8)) |
(dcfg->cfg0_param3_c1[i + 1] & REG_MASK(8) << 8) |
(dcfg->cfg0_param3_c1[i + 2] & REG_MASK(8) << 16) |
(dcfg->cfg0_param3_c1[i + 3] & REG_MASK(8) << 23);
temp[shift + 4] = (dcfg->cfg0_param3_c2[i] & REG_MASK(8)) |
(dcfg->cfg0_param3_c2[i + 1] & REG_MASK(8) << 8) |
(dcfg->cfg0_param3_c2[i + 2] & REG_MASK(8) << 16) |
(dcfg->cfg0_param3_c2[i + 3] & REG_MASK(8) << 23);
DRM_DEBUG_DRIVER("0xb0: index %d value %x\n",
shift, temp[shift]);
DRM_DEBUG_DRIVER("0xb0: index %d value %x\n",
shift + 2, temp[shift + 2]);
DRM_DEBUG_DRIVER("0xb0: index %d value %x\n",
shift + 4, temp[shift + 4]);
shift++;
}
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xb0,
temp, ARRAY_SIZE(dcfg->cfg0_param3_c0) * 3 * sizeof(u32),
REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0xb0: write err %d len %zd buffer index %d\n", rc,
ARRAY_SIZE(dcfg->cfg0_param3_c0) * 3 * sizeof(u32),
dma_write_cfg->dma_buf->index);
goto quit;
}
for (i = 0; i < 3; i++) {
rc = __reg_dmav1_setup_demurav1_cfg0_c_params(dma_write_cfg,
dcfg, dma_ops, temp,
sizeof(struct drm_msm_dem_cfg), i,
demura_base);
if (rc)
goto quit;
}
width = hw_cfg->displayh >> 1;
DRM_DEBUG_DRIVER("0x80: value %x\n", width);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x80,
&width, sizeof(width), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x80: REG_SINGLE_WRITE err %d len %zd buf idx %d\n",
rc, sizeof(width), dma_write_cfg->dma_buf->index);
goto quit;
}
i = 0x400;
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xe0,
&i, sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0xe0: REG_SINGLE_WRITE err %d len %zd buf idx %d\n",
rc, sizeof(i), dma_write_cfg->dma_buf->index);
goto quit;
}
memset(temp, 0, sizeof(u32) * 2);
for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param4); i++)
DRM_DEBUG_DRIVER("hfc gain is %d\n", dcfg->cfg0_param4[i]);
temp[0] = (dcfg->cfg0_param4[0] & REG_MASK(5)) |
((dcfg->cfg0_param4[1] & REG_MASK(5)) << 8) |
((dcfg->cfg0_param4[2] & REG_MASK(5)) << 16) |
((dcfg->cfg0_param4[3] & REG_MASK(5)) << 24);
temp[1] = (dcfg->cfg0_param4[4] & REG_MASK(5)) |
((dcfg->cfg0_param4[5] & REG_MASK(5)) << 8) |
((dcfg->cfg0_param4[6] & REG_MASK(5)) << 16) |
((dcfg->cfg0_param4[7] & REG_MASK(5)) << 24);
DRM_DEBUG_DRIVER("0x4c: value is temp[0] %x temp[1] %x\n",
temp[0], temp[1]);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4c,
temp, sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x4c: REG_BLK_WRITE_SINGLE %d len %zd buf idx %d\n",
rc, sizeof(u32) * 2, dma_write_cfg->dma_buf->index);
goto quit;
}
quit:
kfree(temp);
return rc;
}
static int __reg_dmav1_setup_demurav1_cfg1(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops,
struct sde_hw_cp_cfg *hw_cfg)
{
u32 temp[2], i, shift, *cfg1_data = NULL, len = 0;
int rc;
u32 width = 0;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
if (!dcfg->cfg1_en) {
DRM_DEBUG_DRIVER("dcfg->cfg1_en is disabled\n");
return 0;
}
len = ARRAY_SIZE(dcfg->cfg1_param0_c0);
cfg1_data = kcalloc(len, sizeof(u32), GFP_KERNEL);
if (!cfg1_data)
return -ENOMEM;
DRM_DEBUG_DRIVER("dcfg->cfg1_high_idx %d dcfg->cfg1_low_idx %d\n",
dcfg->cfg1_high_idx, dcfg->cfg1_low_idx);
if (dcfg->cfg1_high_idx >= ARRAY_SIZE(dcfg->cfg01_param0))
dcfg->cfg1_high_idx = ARRAY_SIZE(dcfg->cfg01_param0) - 1;
if (dcfg->cfg1_low_idx >= ARRAY_SIZE(dcfg->cfg01_param0))
dcfg->cfg1_low_idx = ARRAY_SIZE(dcfg->cfg01_param0) - 1;
temp[0] = dcfg->cfg01_param0[dcfg->cfg1_high_idx];
temp[1] = dcfg->cfg01_param0[dcfg->cfg1_low_idx];
if (temp[0] > temp[1])
shift = temp[0] - temp[1];
else
shift = 1;
i = (1 << 22) / shift;
DRM_DEBUG_DRIVER("0x14: value %x\n", i);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x14,
&i, sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x14: REG_SINGLE_WRITE failed ret %d\n", rc);
goto quit;
}
width = hw_cfg->displayh;
DRM_DEBUG_DRIVER("width for LFC calculation is %d\n", width);
if (hw_cfg->displayh < hw_cfg->displayv) {
temp[0] = (8 * (1 << 21)) / width;
temp[1] = (16 * (1 << 21)) / hw_cfg->displayv;
} else {
temp[0] = (16 * (1 << 21)) / width;
temp[1] = (8 * (1 << 21)) / hw_cfg->displayv;
}
temp[0] = (dcfg->pentile) ? ((temp[0]) | BIT(31)) : temp[0];
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x54,
&temp[0], sizeof(temp[0]), REG_SINGLE_WRITE, 0, 0, 0);
DRM_DEBUG_DRIVER("0x54 value %x\n", temp[0]);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x54: REG_SINGLE_WRITE ret %d\n", rc);
goto quit;
}
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x5c,
&temp[1], sizeof(temp[1]), REG_SINGLE_WRITE, 0, 0, 0);
DRM_DEBUG_DRIVER("0x5c value %x\n", temp[1]);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x5c: REG_SINGLE_WRITE ret %d\n", rc);
goto quit;
}
cfg1_data[0] = (dcfg->cfg1_param0_c0[0] & REG_MASK(10)) |
((dcfg->cfg1_param0_c1[0] & REG_MASK(10)) << 10) |
((dcfg->cfg1_param0_c2[0] & REG_MASK(10)) << 20) | BIT(31);
DRM_DEBUG_DRIVER("0x64: value %x\n", cfg1_data[0]);
for (i = 1; i < len; i++) {
cfg1_data[i] = (dcfg->cfg1_param0_c0[i] & REG_MASK(10)) |
((dcfg->cfg1_param0_c1[i] & REG_MASK(10)) << 10) |
((dcfg->cfg1_param0_c2[i] & REG_MASK(10)) << 20);
DRM_DEBUG_DRIVER("0x64 index %d value %x\n", i,
cfg1_data[i]);
}
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x64,
cfg1_data, len * sizeof(u32), REG_BLK_WRITE_INC, 0,
0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("lut write failed ret %d\n", rc);
goto quit;
}
quit:
kfree(cfg1_data);
return rc;
}
static int __reg_dmav1_setup_demurav1_cfg3(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops)
{
u32 temp[CFG3_PARAM01_LEN + 1];
int rc, i;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
if (!dcfg->cfg3_en) {
DRM_DEBUG_DRIVER("dcfg->cfg3_en is disabled\n");
return 0;
}
temp[0] = dcfg->cfg3_param0_a[0] & REG_MASK(10);
temp[0] = (dcfg->cfg3_param0_a[1] & REG_MASK(10)) << 16;
temp[1] = dcfg->cfg3_param0_a[2] & REG_MASK(10);
temp[1] = (dcfg->cfg3_param0_a[3] & REG_MASK(10)) << 16;
temp[2] = dcfg->cfg3_param0_b[0] & REG_MASK(11);
temp[2] = (dcfg->cfg3_param0_b[1] & REG_MASK(11)) << 16;
temp[3] = dcfg->cfg3_param0_b[2] & REG_MASK(11);
temp[3] = (dcfg->cfg3_param0_b[3] & REG_MASK(11)) << 16;
temp[4] = (dcfg->cfg3_ab_adj) & REG_MASK(11);
for (i = 0; i < ARRAY_SIZE(temp); i++)
DRM_DEBUG_DRIVER("0xd0: index %i value %x\n", i, temp[i]);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xd0,
temp, ARRAY_SIZE(temp) * sizeof(u32), REG_BLK_WRITE_SINGLE, 0,
0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc)
DRM_ERROR("0xd0: REG_BLK_WRITE_SINGLE failed ret %d\n", rc);
return rc;
}
static int __reg_dmav1_setup_demurav1_cfg5(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops)
{
u32 temp[CFG5_PARAM01_LEN];
int rc, i;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
if (!dcfg->cfg5_en) {
DRM_DEBUG_DRIVER("dcfg->cfg5_en is disabled\n");
return 0;
}
temp[0] = dcfg->cfg5_param0[0] & REG_MASK(10);
temp[0] |= (dcfg->cfg5_param0[1] & REG_MASK(10)) << 16;
temp[1] = dcfg->cfg5_param0[2] & REG_MASK(10);
temp[1] |= (dcfg->cfg5_param0[3] & REG_MASK(10)) << 16;
temp[2] = dcfg->cfg5_param1[0] & REG_MASK(11);
temp[2] |= (dcfg->cfg5_param1[1] & REG_MASK(11)) << 16;
temp[3] = dcfg->cfg5_param1[2] & REG_MASK(11);
temp[3] |= (dcfg->cfg5_param1[3] & REG_MASK(11)) << 16;
for (i = 0; i < ARRAY_SIZE(temp); i++)
DRM_DEBUG_DRIVER("0xa0: index %i value %x\n", i, temp[i]);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xa0,
temp, ARRAY_SIZE(temp) * sizeof(u32), REG_BLK_WRITE_SINGLE, 0,
0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc)
DRM_ERROR("0xa0: REG_BLK_WRITE_SINGLE failed ret %d\n", rc);
return rc;
}
static int __reg_dmav1_setup_demurav1_en(struct sde_hw_dspp *ctx,
struct drm_msm_dem_cfg *dcfg,
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
struct sde_hw_reg_dma_ops *dma_ops)
{
u32 en = 0, backl;
int rc;
u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
backl = (1024 << 16) | 1024;
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x8,
&backl, sizeof(backl), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc) {
DRM_ERROR("0x8: REG_SINGLE_WRITE failed ret %d\n", rc);
return rc;
}
en = (dcfg->src_id) ? BIT(31) : 0;
en |= (dcfg->cfg1_high_idx & REG_MASK(3)) << 24;
en |= (dcfg->cfg1_low_idx & REG_MASK(3)) << 20;
en |= (dcfg->c2_depth & REG_MASK(4)) << 16;
en |= (dcfg->c1_depth & REG_MASK(4)) << 12;
en |= (dcfg->c0_depth & REG_MASK(4)) << 8;
en |= (dcfg->cfg3_en) ? BIT(5) : 0;
en |= (dcfg->cfg4_en) ? BIT(4) : 0;
en |= (dcfg->cfg2_en) ? BIT(3) : 0;
en |= (dcfg->cfg0_en) ? BIT(2) : 0;
en |= (dcfg->cfg1_en) ? BIT(1) : 0;
DRM_DEBUG_DRIVER("demura en %x\n", en);
REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4,
&en, sizeof(en), REG_SINGLE_WRITE, 0, 0, 0);
rc = dma_ops->setup_payload(dma_write_cfg);
if (rc)
DRM_ERROR("0x4: REG_SINGLE_WRITE failed ret %d\n", rc);
return rc;
}
void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfx)
{
struct drm_msm_dem_cfg *dcfg;
struct sde_hw_cp_cfg *hw_cfg = cfx;
int rc = 0;
struct sde_hw_reg_dma_ops *dma_ops;
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
struct sde_reg_dma_kickoff_cfg kick_off;
rc = reg_dma_dspp_check(ctx, cfx, DEMURA_CFG);
if (rc)
return;
if (!hw_cfg->payload) {
reg_dma_demura_off(ctx, hw_cfg);
return;
}
if (hw_cfg->len != sizeof(struct drm_msm_dem_cfg)) {
DRM_ERROR("invalid sz of payload len %d exp %zd\n",
hw_cfg->len, sizeof(struct drm_msm_dem_cfg));
}
dcfg = hw_cfg->payload;
dma_ops = sde_reg_dma_get_ops();
dma_ops->reset_reg_dma_buf(dspp_buf[DEMURA_CFG][ctx->idx]);
REG_DMA_INIT_OPS(dma_write_cfg, MDSS, DEMURA_CFG,
dspp_buf[DEMURA_CFG][ctx->idx]);
REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
rc = dma_ops->setup_payload(&dma_write_cfg);
if (rc) {
DRM_ERROR("write decode select failed ret %d\n", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg0(ctx, dcfg, &dma_write_cfg,
dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg0 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg1(ctx, dcfg, &dma_write_cfg,
dma_ops, hw_cfg);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg1 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg3(ctx, dcfg, &dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg3 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_cfg5(ctx, dcfg, &dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_cfg5 rc %d", rc);
return;
}
rc = __reg_dmav1_setup_demurav1_en(ctx, dcfg, &dma_write_cfg,
dma_ops);
if (rc) {
DRM_ERROR("failed setup_demurav1_en rc %d", rc);
return;
}
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
dspp_buf[DEMURA_CFG][ctx->idx],
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
DRM_DEBUG("enable demura buffer size %d\n",
dspp_buf[DEMURA_CFG][ctx->idx]->index);
rc = dma_ops->kick_off(&kick_off);
if (rc)
DRM_ERROR("failed to kick off ret %d\n", rc);
}

View File

@@ -295,4 +295,11 @@ void reg_dmav2_setup_vig_gamutv61(struct sde_hw_pipe *ctx, void *cfg);
*/ */
void reg_dmav1_setup_spr_init_cfgv1(struct sde_hw_dspp *ctx, void *cfg); void reg_dmav1_setup_spr_init_cfgv1(struct sde_hw_dspp *ctx, void *cfg);
/**
* reg_dmav1_setup_demurav1() - function to set up the demurav1 configuration.
* @ctx: dspp ctx info
* @cfg: pointer to struct sde_hw_cp_cfg
*/
void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfg);
#endif /* _SDE_HW_REG_DMA_V1_COLOR_PROC_H */ #endif /* _SDE_HW_REG_DMA_V1_COLOR_PROC_H */

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/ */
#ifndef _SDE_HW_UTIL_H #ifndef _SDE_HW_UTIL_H
@@ -12,6 +12,7 @@
#include "sde_hw_catalog.h" #include "sde_hw_catalog.h"
#define REG_MASK(n) ((BIT(n)) - 1) #define REG_MASK(n) ((BIT(n)) - 1)
#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
#define LP_DDR4_TYPE 0x7 #define LP_DDR4_TYPE 0x7
struct sde_format_extended; struct sde_format_extended;

View File

@@ -3565,8 +3565,11 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
psde->pipe_sblk->max_per_pipe_bw_high * 1000LL); psde->pipe_sblk->max_per_pipe_bw_high * 1000LL);
index = (master_plane_id == 0) ? 0 : 1; index = (master_plane_id == 0) ? 0 : 1;
if (catalog->has_demura && if (catalog->has_demura &&
catalog->demura_supported[psde->pipe][index] != ~0x0) catalog->demura_supported[psde->pipe][index] != ~0x0) {
sde_kms_info_add_keyint(info, "demura_block", index); sde_kms_info_add_keyint(info, "demura_block", index);
sde_kms_info_add_keyint(info, "demura_pipe_id",
psde->pipe - SSPP_DMA0);
}
if ((is_master && if ((is_master &&
(psde->features & BIT(SDE_SSPP_INVERSE_PMA))) || (psde->features & BIT(SDE_SSPP_INVERSE_PMA))) ||

View File

@@ -59,6 +59,7 @@ enum sde_reg_dma_read_sel {
* @LTM_ROI: LTM ROI * @LTM_ROI: LTM ROI
* @LTM_VLUT: LTM VLUT * @LTM_VLUT: LTM VLUT
* @RC_DATA: Rounded corner data * @RC_DATA: Rounded corner data
* @DEMURA_CFG: Demura feature
* @REG_DMA_FEATURES_MAX: invalid selection * @REG_DMA_FEATURES_MAX: invalid selection
*/ */
enum sde_reg_dma_features { enum sde_reg_dma_features {
@@ -79,6 +80,7 @@ enum sde_reg_dma_features {
LTM_ROI, LTM_ROI,
LTM_VLUT, LTM_VLUT,
RC_DATA, RC_DATA,
DEMURA_CFG,
REG_DMA_FEATURES_MAX, REG_DMA_FEATURES_MAX,
}; };