Files
android_kernel_samsung_sm86…/msm/sde/sde_hw_ds.c
Narendra Muppalla daa511cb90 Merge remote-tracking branch 'quic/display-kernel.lnx.5.10' into display-kernel.lnx.5.15
* quic/display-kernel.lnx.5.10:
  disp: msm: sde: avoid error during fal10_veto override enablement
  disp: msm: update copyright description
  disp: msm: sde: configure dest_scaler op_mode for two independent displays
  disp: msm: dp: updated copyright set for 4nm target
  Revert "disp: msm: sde: consider max of actual and default prefill lines"
  disp: msm: sde: Reset backlight scale when HWC is stopped
  disp: msm: dp: avoid duplicate read of link status
  disp: msm: dsi: update vreg_ctrl settings for cape
  disp: msm: fail commit if drm_gem_obj was found attached to a sec CB
  disp: msm: dp: updated register values for 4nm target
  disp: msm: sde: update framedata event handling
  disp: msm: dsi: Add new phy comaptible string for cape
  disp: msm: sde: software override for fal10 in cwb enable
  disp: msm: update cleanup during bind failure in msm_drm_component_init
  disp: msm: sde: dump user input_fence info on spec fence timeout
  disp: msm: sde: add null pointer check for encoder current master
  disp: msm: dsi: enable DMA start window scheduling for broadcast commands
  disp: msm: sde: avoid alignment checks for linear formats
  disp: msm: reset thread priority work on every new run
  disp: msm: sde: send power on event for cont. splash
  disp: msm: sde: always set CTL_x_UIDLE_ACTIVE register to "1"
  disp: msm: use vzalloc for large allocations
  disp: msm: sde: Add support to limit DSC size to 10k
  disp: msm: sde: add tx wait during DMS for sim panel
  disp: msm: dsi: add check for any queued DSI CMDs before clock force update
  disp: msm: sde: correct pp block allocation during dcwb dither programming
  disp: msm: sde: avoid setting of max vblank count
  disp: msm: sde: add cached lut flag in sde plane
  disp: msm: sde: avoid use after free in msm_lastclose
  disp: msm: sde: update TEAR_SYNC_WRCOUNT register before vsync counter
  disp: msm: dsi: Support uncompressed rgb101010 format
  disp: msm: sde: update idle_pc_enabled flag for all encoders
  disp: msm: sde: flush esd work before disabling the encoder
  disp: msm: sde: allow qsync update along with modeset
  disp: msm: dp: avoid dp sw reset on disconnect path
  disp: msm: sde: consider max of actual and default prefill lines
  disp: msm: ensure vbif debugbus not in use is disabled
  disp: msm: sde: update cached encoder mask if required
  disp: msm: sde: while timing engine enabling poll for active region
  disp: msm: enable cache flag for dumb buffer
  disp: msm: sde: disable ot limit for cwb
  disp: msm: sde: avoid race condition at vm release
  disp: msm: dsi: set qsync min fps list length to zero
  disp: msm: sde: reset mixers in crtc when ctl datapath switches
  disp: msm: sde: update vm state atomic check for non-primary usecases
  disp: msm: sde: reset CTL_UIDLE_ACTIVE register only if uidle is disabled

Change-Id: If480e7f33743eb4788549f853ba05e744ecb38d3
Signed-off-by: Narendra Muppalla <quic_nmuppall@quicinc.com>
2022-02-08 16:38:13 -08:00

160 lines
3.8 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*/
#include "sde_hw_ds.h"
#include "sde_formats.h"
#include "sde_dbg.h"
#include "sde_kms.h"
/* Destination scaler TOP registers */
#define DEST_SCALER_OP_MODE 0x00
#define DEST_SCALER_HW_VERSION 0x10
#define DEST_SCALER_MERGE_CTRL 0x0C
#define DEST_SCALER_DUAL_PIPE 1
#define DEST_SCALER_QUAD_PIPE 3
static void sde_hw_ds_setup_opmode(struct sde_hw_ds *hw_ds, u32 op_mode)
{
struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
u32 op_mode_val;
op_mode_val = SDE_REG_READ(hw, DEST_SCALER_OP_MODE);
if (op_mode)
op_mode_val |= op_mode;
else if (!op_mode && (op_mode_val & SDE_DS_OP_MODE_DUAL))
op_mode_val = 0;
else
op_mode_val &= ~BIT(hw_ds->idx - DS_0);
SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode_val);
}
static void sde_hw_ds_setup_opmode_v1(struct sde_hw_ds *hw_ds, u32 op_mode)
{
struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
if (op_mode & SDE_DS_OP_MODE_DUAL) {
op_mode = DEST_SCALER_DUAL_PIPE;
SDE_REG_WRITE(hw, DEST_SCALER_MERGE_CTRL + hw_ds->scl->base, op_mode);
}
}
static void sde_hw_ds_setup_scaler3(struct sde_hw_ds *hw_ds,
void *scaler_cfg, void *scaler_lut_cfg)
{
struct sde_hw_scaler3_cfg *scl3_cfg = scaler_cfg;
struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg = scaler_lut_cfg;
bool de_lpf_en = false;
if (!hw_ds || !hw_ds->scl || !scl3_cfg || !scl3_lut_cfg)
return;
/*
* copy LUT values to scaler structure
*/
if (scl3_lut_cfg->is_configured) {
scl3_cfg->dir_lut = scl3_lut_cfg->dir_lut;
scl3_cfg->dir_len = scl3_lut_cfg->dir_len;
scl3_cfg->cir_lut = scl3_lut_cfg->cir_lut;
scl3_cfg->cir_len = scl3_lut_cfg->cir_len;
scl3_cfg->sep_lut = scl3_lut_cfg->sep_lut;
scl3_cfg->sep_len = scl3_lut_cfg->sep_len;
}
if (test_bit(SDE_DS_DE_LPF_BLEND, &hw_ds->scl->features))
de_lpf_en = true;
sde_hw_setup_scaler3(&hw_ds->hw, scl3_cfg, hw_ds->scl->version,
hw_ds->scl->base,
sde_get_sde_format(DRM_FORMAT_XBGR2101010), de_lpf_en);
}
static void _setup_ds_ops(struct sde_hw_ds_ops *ops, unsigned long features)
{
if (test_bit(SDE_DS_MERGE_CTRL, &features))
ops->setup_opmode = sde_hw_ds_setup_opmode_v1;
else
ops->setup_opmode = sde_hw_ds_setup_opmode;
if (test_bit(SDE_SSPP_SCALER_QSEED3, &features) ||
test_bit(SDE_SSPP_SCALER_QSEED3LITE, &features))
ops->setup_scaler = sde_hw_ds_setup_scaler3;
}
static struct sde_ds_cfg *_ds_offset(enum sde_ds ds,
struct sde_mdss_cfg *m,
void __iomem *addr,
struct sde_hw_blk_reg_map *b)
{
int i;
if (!m || !addr || !b)
return ERR_PTR(-EINVAL);
for (i = 0; i < m->ds_count; i++) {
if ((ds == m->ds[i].id) &&
(m->ds[i].top)) {
b->base_off = addr;
b->blk_off = m->ds[i].top->base;
b->length = m->ds[i].top->len;
b->hw_rev = m->hw_rev;
b->log_mask = SDE_DBG_MASK_DS;
return &m->ds[i];
}
}
return ERR_PTR(-EINVAL);
}
struct sde_hw_blk_reg_map *sde_hw_ds_init(enum sde_ds idx,
void __iomem *addr,
struct sde_mdss_cfg *m)
{
struct sde_hw_ds *hw_ds;
struct sde_ds_cfg *cfg;
if (!addr || !m)
return ERR_PTR(-EINVAL);
hw_ds = kzalloc(sizeof(*hw_ds), GFP_KERNEL);
if (!hw_ds)
return ERR_PTR(-ENOMEM);
cfg = _ds_offset(idx, m, addr, &hw_ds->hw);
if (IS_ERR_OR_NULL(cfg)) {
SDE_ERROR("failed to get ds cfg\n");
kfree(hw_ds);
return ERR_PTR(-EINVAL);
}
/* Assign ops */
hw_ds->idx = idx;
hw_ds->scl = cfg;
_setup_ds_ops(&hw_ds->ops, hw_ds->scl->features);
if (m->qseed_hw_rev)
hw_ds->scl->version = m->qseed_hw_rev;
if (cfg->len) {
sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
hw_ds->hw.blk_off + cfg->base,
hw_ds->hw.blk_off + cfg->base + cfg->len,
hw_ds->hw.xin_id);
}
return &hw_ds->hw;
}
void sde_hw_ds_destroy(struct sde_hw_blk_reg_map *hw)
{
if (hw)
kfree(to_sde_hw_ds(hw));
}