disp: msm: sde: Update LTM merge mode setting for kailua
Update LTM merge mode setting for kailua since merge control has its own register. Change-Id: Ieaacd1e12f410def18a0fce11a77c94832c416f5 Signed-off-by: Renchao Liu <quic_rencliu@quicinc.com>
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <drm/msm_drm_pp.h>
|
||||
@@ -316,8 +317,40 @@ void sde_setup_dspp_ltm_hist_bufferv1(struct sde_hw_dspp *ctx, u64 addr)
|
||||
(hs_addr & 0xFFFFFF00));
|
||||
}
|
||||
|
||||
static void sde_setup_dspp_ltm_hist_ctrl_common(struct sde_hw_dspp *ctx,
|
||||
u64 addr, u32 op_mode,
|
||||
struct sde_ltm_phase_info *phase)
|
||||
{
|
||||
u32 offset;
|
||||
|
||||
if (ctx->idx >= DSPP_MAX) {
|
||||
DRM_ERROR("Invalid idx %d\n", ctx->idx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (phase->portrait_en)
|
||||
op_mode |= BIT(2);
|
||||
else
|
||||
op_mode &= ~BIT(2);
|
||||
|
||||
offset = ctx->cap->sblk->ltm.base + 0x8;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase->init_h[ctx->idx] & 0x7FFFFFF));
|
||||
offset += 4;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase->init_v & 0xFFFFFF));
|
||||
offset += 4;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase->inc_h & 0xFFFFFF));
|
||||
offset += 4;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase->inc_v & 0xFFFFFF));
|
||||
|
||||
op_mode |= BIT(0);
|
||||
sde_setup_dspp_ltm_hist_bufferv1(ctx, addr);
|
||||
|
||||
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
|
||||
(op_mode & 0x1FFFFFF));
|
||||
}
|
||||
|
||||
void sde_setup_dspp_ltm_hist_ctrlv1(struct sde_hw_dspp *ctx, void *cfg,
|
||||
bool enable, u64 addr)
|
||||
bool enable, u64 addr)
|
||||
{
|
||||
struct sde_hw_cp_cfg *hw_cfg = cfg;
|
||||
struct sde_ltm_phase_info phase;
|
||||
@@ -347,38 +380,58 @@ void sde_setup_dspp_ltm_hist_ctrlv1(struct sde_hw_dspp *ctx, void *cfg,
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->idx >= DSPP_MAX) {
|
||||
DRM_ERROR("Invalid idx %d\n", ctx->idx);
|
||||
memset(&phase, 0, sizeof(phase));
|
||||
sde_ltm_get_phase_info(hw_cfg, &phase);
|
||||
if (phase.merge_en)
|
||||
op_mode |= BIT(16);
|
||||
else
|
||||
op_mode &= ~LTM_CONFIG_MERGE_MODE_ONLY;
|
||||
|
||||
sde_setup_dspp_ltm_hist_ctrl_common(ctx, addr, op_mode, &phase);
|
||||
}
|
||||
|
||||
void sde_setup_dspp_ltm_hist_ctrlv1_2(struct sde_hw_dspp *ctx, void *cfg,
|
||||
bool enable, u64 addr)
|
||||
{
|
||||
struct sde_hw_cp_cfg *hw_cfg = cfg;
|
||||
struct sde_ltm_phase_info phase;
|
||||
u32 op_mode, offset;
|
||||
u32 merge_mode = 0;
|
||||
|
||||
if (!ctx) {
|
||||
DRM_ERROR("invalid parameters ctx %pK\n", ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (enable && (!addr || !cfg)) {
|
||||
DRM_ERROR("invalid addr 0x%llx cfg %pK\n", addr, cfg);
|
||||
return;
|
||||
}
|
||||
|
||||
offset = ctx->cap->sblk->ltm.base + 0x4;
|
||||
op_mode = SDE_REG_READ(&ctx->hw, offset);
|
||||
if (!enable) {
|
||||
if (op_mode & BIT(1))
|
||||
op_mode &= ~BIT(0);
|
||||
else
|
||||
op_mode = 0x0;
|
||||
|
||||
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
|
||||
(op_mode & 0x1FFFFFF));
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&phase, 0, sizeof(phase));
|
||||
sde_ltm_get_phase_info(hw_cfg, &phase);
|
||||
|
||||
if (phase.portrait_en)
|
||||
op_mode |= BIT(2);
|
||||
else
|
||||
op_mode &= ~BIT(2);
|
||||
|
||||
offset = ctx->cap->sblk->ltm.base + 0x18;
|
||||
if (phase.merge_en)
|
||||
op_mode |= BIT(16);
|
||||
merge_mode = BIT(0);
|
||||
else
|
||||
op_mode &= ~(BIT(16) | BIT(17));
|
||||
merge_mode = 0x0;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (merge_mode & 0x3));
|
||||
|
||||
offset = ctx->cap->sblk->ltm.base + 0x8;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase.init_h[ctx->idx] & 0x7FFFFFF));
|
||||
offset += 4;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase.init_v & 0xFFFFFF));
|
||||
offset += 4;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase.inc_h & 0xFFFFFF));
|
||||
offset += 4;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, (phase.inc_v & 0xFFFFFF));
|
||||
|
||||
op_mode |= BIT(0);
|
||||
sde_setup_dspp_ltm_hist_bufferv1(ctx, addr);
|
||||
|
||||
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
|
||||
(op_mode & 0x1FFFFFF));
|
||||
sde_setup_dspp_ltm_hist_ctrl_common(ctx, addr, op_mode, &phase);
|
||||
}
|
||||
|
||||
void sde_ltm_read_intr_status(struct sde_hw_dspp *ctx, u32 *status)
|
||||
@@ -399,6 +452,17 @@ void sde_ltm_read_intr_status(struct sde_hw_dspp *ctx, u32 *status)
|
||||
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x58, clear);
|
||||
}
|
||||
|
||||
void sde_ltm_clear_merge_modev1_2(struct sde_hw_dspp *ctx)
|
||||
{
|
||||
if (!ctx) {
|
||||
DRM_ERROR("invalid parameters ctx %pK\n", ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear the merge_mode bit */
|
||||
SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x18, 0x0);
|
||||
}
|
||||
|
||||
void sde_ltm_clear_merge_mode(struct sde_hw_dspp *ctx)
|
||||
{
|
||||
u32 clear;
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef _SDE_HW_COLOR_PROC_V4_H_
|
||||
@@ -58,6 +59,17 @@ void sde_setup_dspp_ltm_threshv1(struct sde_hw_dspp *ctx, void *cfg);
|
||||
*/
|
||||
void sde_setup_dspp_ltm_hist_ctrlv1(struct sde_hw_dspp *ctx, void *cfg,
|
||||
bool enable, u64 addr);
|
||||
|
||||
/**
|
||||
* sde_setup_dspp_ltm_hist_ctrlv1_2 - Same as v1 except ltm merge mode setting.
|
||||
* @ctx: dspp ctx pointer
|
||||
* @cfg: pointer to sde_hw_cp_cfg
|
||||
* @enable: feature enable/disable value
|
||||
* @addr: aligned iova address
|
||||
*/
|
||||
void sde_setup_dspp_ltm_hist_ctrlv1_2(struct sde_hw_dspp *ctx, void *cfg,
|
||||
bool enable, u64 addr);
|
||||
|
||||
/**
|
||||
* sde_setup_dspp_ltm_hist_bufferv1 - Function for setting ltm hist buffer v1.
|
||||
* @ctx: dspp ctx pointer
|
||||
@@ -78,6 +90,12 @@ void sde_ltm_read_intr_status(struct sde_hw_dspp *dspp, u32 *status);
|
||||
*/
|
||||
void sde_ltm_clear_merge_mode(struct sde_hw_dspp *dspp);
|
||||
|
||||
/**
|
||||
* sde_ltm_clear_merge_modev1_2 - api to clear ltm merge_mode
|
||||
* @dspp: pointer to dspp object
|
||||
*/
|
||||
void sde_ltm_clear_merge_modev1_2(struct sde_hw_dspp *dspp);
|
||||
|
||||
|
||||
/**
|
||||
* sde_demura_backlight_cfg - api to set backlight for demura
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
@@ -231,7 +231,8 @@ static void dspp_ltm(struct sde_hw_dspp *c)
|
||||
int ret = 0;
|
||||
|
||||
if (c->cap->sblk->ltm.version == SDE_COLOR_PROCESS_VER(0x1, 0x0) ||
|
||||
c->cap->sblk->ltm.version == SDE_COLOR_PROCESS_VER(0x1, 0x1)) {
|
||||
c->cap->sblk->ltm.version == SDE_COLOR_PROCESS_VER(0x1, 0x1) ||
|
||||
c->cap->sblk->ltm.version == SDE_COLOR_PROCESS_VER(0x1, 0x2)) {
|
||||
ret = reg_dmav1_init_ltm_op_v6(SDE_LTM_INIT, c->idx);
|
||||
if (!ret)
|
||||
ret = reg_dmav1_init_ltm_op_v6(SDE_LTM_ROI, c->idx);
|
||||
@@ -239,16 +240,29 @@ static void dspp_ltm(struct sde_hw_dspp *c)
|
||||
ret = reg_dmav1_init_ltm_op_v6(SDE_LTM_VLUT, c->idx);
|
||||
|
||||
if (!ret) {
|
||||
if (c->cap->sblk->ltm.version ==
|
||||
SDE_COLOR_PROCESS_VER(0x1, 0x2)) {
|
||||
c->ops.setup_ltm_vlut =
|
||||
reg_dmav1_setup_ltm_vlutv1_2;
|
||||
c->ops.setup_ltm_hist_ctrl =
|
||||
sde_setup_dspp_ltm_hist_ctrlv1_2;
|
||||
c->ops.clear_ltm_merge_mode =
|
||||
sde_ltm_clear_merge_modev1_2;
|
||||
} else {
|
||||
c->ops.setup_ltm_vlut =
|
||||
reg_dmav1_setup_ltm_vlutv1;
|
||||
c->ops.setup_ltm_hist_ctrl =
|
||||
sde_setup_dspp_ltm_hist_ctrlv1;
|
||||
c->ops.clear_ltm_merge_mode =
|
||||
sde_ltm_clear_merge_mode;
|
||||
}
|
||||
|
||||
c->ops.setup_ltm_init = reg_dmav1_setup_ltm_initv1;
|
||||
c->ops.setup_ltm_roi = reg_dmav1_setup_ltm_roiv1;
|
||||
c->ops.setup_ltm_vlut = reg_dmav1_setup_ltm_vlutv1;
|
||||
c->ops.setup_ltm_thresh = sde_setup_dspp_ltm_threshv1;
|
||||
c->ops.setup_ltm_hist_ctrl =
|
||||
sde_setup_dspp_ltm_hist_ctrlv1;
|
||||
c->ops.setup_ltm_hist_buffer =
|
||||
sde_setup_dspp_ltm_hist_bufferv1;
|
||||
c->ops.ltm_read_intr_status = sde_ltm_read_intr_status;
|
||||
c->ops.clear_ltm_merge_mode = sde_ltm_clear_merge_mode;
|
||||
} else {
|
||||
c->ops.setup_ltm_init = NULL;
|
||||
c->ops.setup_ltm_roi = NULL;
|
||||
@@ -259,8 +273,10 @@ static void dspp_ltm(struct sde_hw_dspp *c)
|
||||
c->ops.ltm_read_intr_status = NULL;
|
||||
c->ops.clear_ltm_merge_mode = NULL;
|
||||
}
|
||||
if (!ret && c->cap->sblk->ltm.version ==
|
||||
SDE_COLOR_PROCESS_VER(0x1, 0x1))
|
||||
if (!ret && (c->cap->sblk->ltm.version ==
|
||||
SDE_COLOR_PROCESS_VER(0x1, 0x1) ||
|
||||
c->cap->sblk->ltm.version ==
|
||||
SDE_COLOR_PROCESS_VER(0x1, 0x2)))
|
||||
c->ltm_checksum_support = true;
|
||||
else
|
||||
c->ltm_checksum_support = false;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
@@ -3774,7 +3774,7 @@ void reg_dmav1_setup_ltm_roiv1(struct sde_hw_dspp *ctx, void *cfg)
|
||||
}
|
||||
}
|
||||
|
||||
static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx)
|
||||
static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx, u32 clear)
|
||||
{
|
||||
enum sde_ltm idx = 0;
|
||||
u32 opmode = 0, offset = 0;
|
||||
@@ -3792,23 +3792,119 @@ static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx)
|
||||
/* disable VLUT/INIT/ROI */
|
||||
opmode &= REG_DMA_LTM_VLUT_DISABLE_OP_MASK;
|
||||
else
|
||||
opmode &= LTM_CONFIG_MERGE_MODE_ONLY;
|
||||
opmode &= clear;
|
||||
SDE_REG_WRITE(&ctx->hw, offset, opmode);
|
||||
}
|
||||
|
||||
static int reg_dmav1_setup_ltm_vlutv1_common(struct sde_hw_dspp *ctx, void *cfg,
|
||||
struct sde_hw_reg_dma_ops *dma_ops,
|
||||
struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
|
||||
u32 *opmode, enum sde_ltm *dspp_idx)
|
||||
{
|
||||
struct drm_msm_ltm_data *payload = NULL;
|
||||
struct sde_hw_cp_cfg *hw_cfg = cfg;
|
||||
enum sde_ltm idx = 0;
|
||||
u32 offset, crs = 0, index = 0, len = 0, blk = 0;
|
||||
u32 i = 0, num_mixers = 0;
|
||||
int rc = 0;
|
||||
|
||||
idx = (enum sde_ltm)ctx->idx;
|
||||
num_mixers = hw_cfg->num_of_mixers;
|
||||
rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
|
||||
if (rc) {
|
||||
if (rc != -EALREADY)
|
||||
DRM_ERROR("failed to get the blk info\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hw_cfg->len != sizeof(struct drm_msm_ltm_data)) {
|
||||
DRM_ERROR("invalid size of payload len %d exp %zd\n",
|
||||
hw_cfg->len, sizeof(struct drm_msm_ltm_data));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
offset = ctx->cap->sblk->ltm.base + 0x5c;
|
||||
crs = SDE_REG_READ(&ctx->hw, offset);
|
||||
if (!(crs & BIT(3))) {
|
||||
DRM_ERROR("LTM VLUT buffer is not ready: crs = %d\n", crs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dma_ops->reset_reg_dma_buf(ltm_buf[LTM_VLUT][idx]);
|
||||
|
||||
REG_DMA_INIT_OPS(*dma_write_cfg, blk, LTM_VLUT, ltm_buf[LTM_VLUT][idx]);
|
||||
REG_DMA_SETUP_OPS(*dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
|
||||
rc = dma_ops->setup_payload(dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write decode select failed ret %d\n", rc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* write VLUT index */
|
||||
REG_DMA_SETUP_OPS(*dma_write_cfg, 0x38, &index, sizeof(u32),
|
||||
REG_SINGLE_WRITE, 0, 0, 0);
|
||||
rc = dma_ops->setup_payload(dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write VLUT index reg failed ret %d\n", rc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
payload = hw_cfg->payload;
|
||||
len = sizeof(u32) * LTM_DATA_SIZE_0 * LTM_DATA_SIZE_3;
|
||||
REG_DMA_SETUP_OPS(*dma_write_cfg, 0x3c, &payload->data[0][0],
|
||||
len, REG_BLK_WRITE_INC, 0, 0, 0);
|
||||
rc = dma_ops->setup_payload(dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write VLUT data failed rc %d\n", rc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_mixers; i++) {
|
||||
/* broadcast feature is not supported with REG_SINGLE_MODIFY */
|
||||
/* reset decode select to unicast */
|
||||
dma_write_cfg->blk = ltm_mapping[dspp_idx[i]];
|
||||
REG_DMA_SETUP_OPS(*dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
|
||||
0, 0);
|
||||
rc = dma_ops->setup_payload(dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write decode select failed ret %d\n", rc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* set the UPDATE_REQ bit */
|
||||
crs = BIT(0);
|
||||
REG_DMA_SETUP_OPS(*dma_write_cfg, 0x5c, &crs, sizeof(u32),
|
||||
REG_SINGLE_MODIFY, 0, 0,
|
||||
REG_DMA_LTM_UPDATE_REQ_MASK);
|
||||
rc = dma_ops->setup_payload(dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write UPDATE_REQ failed ret %d\n", rc);
|
||||
return -EINVAL;
|
||||
}
|
||||
opmode[i] = BIT(1);
|
||||
if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_unsharp)
|
||||
opmode[i] |= BIT(4);
|
||||
if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_dither)
|
||||
opmode[i] |= BIT(6);
|
||||
if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_roi)
|
||||
opmode[i] |= BIT(24);
|
||||
ltm_vlut_ops_mask[dspp_idx[i]] |= ltm_vlut;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
|
||||
{
|
||||
struct drm_msm_ltm_data *payload = NULL;
|
||||
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
|
||||
struct sde_hw_cp_cfg *hw_cfg = cfg;
|
||||
struct sde_reg_dma_kickoff_cfg kick_off;
|
||||
struct sde_hw_reg_dma_ops *dma_ops;
|
||||
struct sde_ltm_phase_info phase;
|
||||
enum sde_ltm dspp_idx[LTM_MAX] = {0};
|
||||
enum sde_ltm idx = 0;
|
||||
u32 offset, crs = 0, index = 0, len = 0, blk = 0, opmode = 0;
|
||||
struct sde_reg_dma_kickoff_cfg kick_off;
|
||||
u32 *opmode;
|
||||
u32 i = 0, num_mixers = 0;
|
||||
int rc = 0;
|
||||
struct sde_hw_cp_cfg *hw_cfg = cfg;
|
||||
enum sde_ltm dspp_idx[LTM_MAX] = {0};
|
||||
enum sde_ltm idx = 0;
|
||||
|
||||
rc = reg_dma_ltm_check(ctx, cfg, LTM_VLUT);
|
||||
if (rc)
|
||||
@@ -3818,106 +3914,43 @@ void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
|
||||
if (!hw_cfg->payload) {
|
||||
DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
|
||||
LOG_FEATURE_OFF;
|
||||
ltm_vlutv1_disable(ctx);
|
||||
ltm_vlutv1_disable(ctx, LTM_CONFIG_MERGE_MODE_ONLY);
|
||||
return;
|
||||
}
|
||||
|
||||
idx = (enum sde_ltm)ctx->idx;
|
||||
num_mixers = hw_cfg->num_of_mixers;
|
||||
rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
|
||||
if (rc) {
|
||||
if (rc != -EALREADY)
|
||||
DRM_ERROR("failed to get the blk info\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (hw_cfg->len != sizeof(struct drm_msm_ltm_data)) {
|
||||
DRM_ERROR("invalid size of payload len %d exp %zd\n",
|
||||
hw_cfg->len, sizeof(struct drm_msm_ltm_data));
|
||||
return;
|
||||
}
|
||||
|
||||
offset = ctx->cap->sblk->ltm.base + 0x5c;
|
||||
crs = SDE_REG_READ(&ctx->hw, offset);
|
||||
if (!(crs & BIT(3))) {
|
||||
DRM_ERROR("LTM VLUT buffer is not ready: crs = %d\n", crs);
|
||||
return;
|
||||
}
|
||||
|
||||
opmode = kvzalloc((num_mixers * sizeof(u32)), GFP_KERNEL);
|
||||
dma_ops = sde_reg_dma_get_ops();
|
||||
dma_ops->reset_reg_dma_buf(ltm_buf[LTM_VLUT][idx]);
|
||||
|
||||
REG_DMA_INIT_OPS(dma_write_cfg, blk, LTM_VLUT, ltm_buf[LTM_VLUT][idx]);
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write decode select failed ret %d\n", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
/* write VLUT index */
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x38, &index, sizeof(u32),
|
||||
REG_SINGLE_WRITE, 0, 0, 0);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write VLUT index reg failed ret %d\n", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
payload = hw_cfg->payload;
|
||||
len = sizeof(u32) * LTM_DATA_SIZE_0 * LTM_DATA_SIZE_3;
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x3c, &payload->data[0][0],
|
||||
len, REG_BLK_WRITE_INC, 0, 0, 0);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write VLUT data failed rc %d\n", rc);
|
||||
return;
|
||||
}
|
||||
rc = reg_dmav1_setup_ltm_vlutv1_common(ctx, cfg, dma_ops,
|
||||
&dma_write_cfg, opmode, dspp_idx);
|
||||
if (rc)
|
||||
goto vlut_exit;
|
||||
|
||||
sde_ltm_get_phase_info(hw_cfg, &phase);
|
||||
for (i = 0; i < num_mixers; i++) {
|
||||
/* broadcast feature is not supported with REG_SINGLE_MODIFY */
|
||||
/* reset decode select to unicast */
|
||||
dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
|
||||
0, 0);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write decode select failed ret %d\n", rc);
|
||||
return;
|
||||
goto vlut_exit;
|
||||
}
|
||||
|
||||
/* set the UPDATE_REQ bit */
|
||||
crs = BIT(0);
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x5c, &crs, sizeof(u32),
|
||||
REG_SINGLE_MODIFY, 0, 0,
|
||||
REG_DMA_LTM_UPDATE_REQ_MASK);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write UPDATE_REQ failed ret %d\n", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
opmode = BIT(1);
|
||||
if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_unsharp)
|
||||
opmode |= BIT(4);
|
||||
if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_dither)
|
||||
opmode |= BIT(6);
|
||||
if (ltm_vlut_ops_mask[dspp_idx[i]] & ltm_roi)
|
||||
opmode |= BIT(24);
|
||||
if (phase.merge_en)
|
||||
opmode |= BIT(16);
|
||||
opmode[i] |= BIT(16);
|
||||
else
|
||||
opmode &= ~(BIT(16) | BIT(17));
|
||||
ltm_vlut_ops_mask[dspp_idx[i]] |= ltm_vlut;
|
||||
opmode[i] &= ~LTM_CONFIG_MERGE_MODE_ONLY;
|
||||
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x4, &opmode, sizeof(u32),
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x4, &opmode[i], sizeof(u32),
|
||||
REG_SINGLE_MODIFY, 0, 0,
|
||||
REG_DMA_LTM_VLUT_ENABLE_OP_MASK);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write UPDATE_REQ failed ret %d\n", rc);
|
||||
return;
|
||||
DRM_ERROR("write opmode failed ret %d\n", rc);
|
||||
goto vlut_exit;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3926,10 +3959,91 @@ void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
|
||||
LTM_VLUT);
|
||||
LOG_FEATURE_ON;
|
||||
rc = dma_ops->kick_off(&kick_off);
|
||||
if (rc) {
|
||||
if (rc)
|
||||
DRM_ERROR("failed to kick off ret %d\n", rc);
|
||||
vlut_exit:
|
||||
kvfree(opmode);
|
||||
}
|
||||
|
||||
void reg_dmav1_setup_ltm_vlutv1_2(struct sde_hw_dspp *ctx, void *cfg)
|
||||
{
|
||||
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
|
||||
struct sde_hw_reg_dma_ops *dma_ops;
|
||||
struct sde_ltm_phase_info phase;
|
||||
struct sde_reg_dma_kickoff_cfg kick_off;
|
||||
u32 merge_mode = 0;
|
||||
u32 *opmode;
|
||||
u32 i = 0, num_mixers = 0;
|
||||
int rc = 0;
|
||||
struct sde_hw_cp_cfg *hw_cfg = cfg;
|
||||
enum sde_ltm dspp_idx[LTM_MAX] = {0};
|
||||
enum sde_ltm idx = 0;
|
||||
|
||||
rc = reg_dma_ltm_check(ctx, cfg, LTM_VLUT);
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
/* disable case */
|
||||
if (!hw_cfg->payload) {
|
||||
DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
|
||||
LOG_FEATURE_OFF;
|
||||
ltm_vlutv1_disable(ctx, 0x0);
|
||||
return;
|
||||
}
|
||||
|
||||
idx = (enum sde_ltm)ctx->idx;
|
||||
num_mixers = hw_cfg->num_of_mixers;
|
||||
opmode = kvzalloc((num_mixers * sizeof(u32)), GFP_KERNEL);
|
||||
dma_ops = sde_reg_dma_get_ops();
|
||||
|
||||
rc = reg_dmav1_setup_ltm_vlutv1_common(ctx, cfg, dma_ops,
|
||||
&dma_write_cfg, opmode, dspp_idx);
|
||||
if (rc)
|
||||
goto vlut_exit;
|
||||
|
||||
sde_ltm_get_phase_info(hw_cfg, &phase);
|
||||
for (i = 0; i < num_mixers; i++) {
|
||||
dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
|
||||
0, 0);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write decode select failed ret %d\n", rc);
|
||||
goto vlut_exit;
|
||||
}
|
||||
|
||||
if (phase.merge_en)
|
||||
merge_mode = BIT(0);
|
||||
else
|
||||
merge_mode = 0x0;
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x18, &merge_mode, sizeof(u32),
|
||||
REG_SINGLE_MODIFY, 0, 0,
|
||||
0xFFFFFFFC);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write merge_ctrl failed ret %d\n", rc);
|
||||
goto vlut_exit;
|
||||
}
|
||||
|
||||
REG_DMA_SETUP_OPS(dma_write_cfg, 0x4, &opmode[i], sizeof(u32),
|
||||
REG_SINGLE_MODIFY, 0, 0,
|
||||
REG_DMA_LTM_VLUT_ENABLE_OP_MASK);
|
||||
rc = dma_ops->setup_payload(&dma_write_cfg);
|
||||
if (rc) {
|
||||
DRM_ERROR("write opmode failed ret %d\n", rc);
|
||||
goto vlut_exit;
|
||||
}
|
||||
}
|
||||
|
||||
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
|
||||
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
|
||||
LTM_VLUT);
|
||||
LOG_FEATURE_ON;
|
||||
rc = dma_ops->kick_off(&kick_off);
|
||||
if (rc)
|
||||
DRM_ERROR("failed to kick off ret %d\n", rc);
|
||||
vlut_exit:
|
||||
kvfree(opmode);
|
||||
}
|
||||
|
||||
int reg_dmav2_init_dspp_op_v4(int feature, enum sde_dspp idx)
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
@@ -251,6 +252,13 @@ void reg_dmav1_setup_ltm_roiv1(struct sde_hw_dspp *ctx, void *cfg);
|
||||
*/
|
||||
void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg);
|
||||
|
||||
/**
|
||||
* reg_dmav1_setup_ltm_vlutv1_2() - Same as v1 except ltm merge mode setting.
|
||||
* @ctx: dspp ctx info
|
||||
* @cfg: pointer to struct sde_hw_cp_cfg
|
||||
*/
|
||||
void reg_dmav1_setup_ltm_vlutv1_2(struct sde_hw_dspp *ctx, void *cfg);
|
||||
|
||||
/**
|
||||
* reg_dmav1_setup_rc_datav1() - RC DATA v1 implementation using reg dma v1.
|
||||
* @ctx: dspp ctx info
|
||||
|
Reference in New Issue
Block a user