disp: msm: sde: add support for display emulation on RUMI.

Add support display emulation targets on RUMI

This change does the following:
-parse dt node to enable display emulation mode.
-use sde_reg_read for pool timeout ops and debug fs dump.
-increases the kickoff timeout when emulation is enabled.
-bypass AXI halt operation when emulation is enabled.

Change-Id: Idc493964c0b8fc89f5d85fcc5755e0874a12d211
Signed-off-by: Amine Najahi <quic_anajahi@quicinc.com>
This commit is contained in:
Amine Najahi
2022-01-20 17:09:14 -05:00
committed by Alex Danila
parent 249b73b0c7
commit 11672b46fc
12 changed files with 69 additions and 126 deletions

View File

@@ -4729,6 +4729,7 @@ u32 sde_encoder_helper_get_kickoff_timeout_ms(struct drm_encoder *drm_enc)
{
struct drm_encoder *src_enc = drm_enc;
struct sde_encoder_virt *sde_enc;
struct sde_kms *sde_kms;
u32 fps;
if (!drm_enc) {
@@ -4736,12 +4737,19 @@ u32 sde_encoder_helper_get_kickoff_timeout_ms(struct drm_encoder *drm_enc)
return DEFAULT_KICKOFF_TIMEOUT_MS;
}
sde_kms = sde_encoder_get_kms(drm_enc);
if (!sde_kms)
return DEFAULT_KICKOFF_TIMEOUT_MS;
if (sde_encoder_in_clone_mode(drm_enc))
src_enc = sde_crtc_get_src_encoder_of_clone(drm_enc->crtc);
if (!src_enc)
return DEFAULT_KICKOFF_TIMEOUT_MS;
if (test_bit(SDE_FEATURE_EMULATED_ENV, sde_kms->catalog->features))
return MAX_KICKOFF_TIMEOUT_MS;
sde_enc = to_sde_encoder_virt(src_enc);
fps = sde_enc->mode_info.frame_rate;

View File

@@ -26,6 +26,9 @@
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
#define DEFAULT_KICKOFF_TIMEOUT_MS 84
/* wait 1 sec for the emulated targets */
#define MAX_KICKOFF_TIMEOUT_MS 100000
#define MAX_TE_PROFILE_COUNT 5
/**
* enum sde_enc_split_role - Role this physical encoder will play in a

View File

@@ -223,6 +223,7 @@ enum sde_prop {
MAX_TRUSTED_VM_DISPLAYS,
TVM_INCLUDE_REG,
IPCC_PROTOCOL_ID,
SDE_EMULATED_ENV,
SDE_PROP_MAX,
};
@@ -622,6 +623,7 @@ static struct sde_prop_type sde_prop[] = {
PROP_TYPE_U32},
{TVM_INCLUDE_REG, "qcom,tvm-include-reg", false, PROP_TYPE_U32_ARRAY},
{IPCC_PROTOCOL_ID, "qcom,sde-ipcc-protocol-id", false, PROP_TYPE_U32},
{SDE_EMULATED_ENV, "qcom,sde-emulated-env", false, PROP_TYPE_BOOL},
};
static struct sde_prop_type sde_perf_prop[] = {
@@ -4029,6 +4031,9 @@ static void _sde_top_parse_dt_helper(struct sde_mdss_cfg *cfg,
i * 2 + 1);
}
}
if (PROP_VALUE_ACCESS(props->values, SDE_EMULATED_ENV, 0))
set_bit(SDE_FEATURE_EMULATED_ENV, cfg->features);
}
static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)

View File

@@ -724,6 +724,7 @@ enum {
* @SDE_FEATURE_CTL_DONE Support for CTL DONE irq
* @SDE_FEATURE_SYS_CACHE_NSE Support for no-self-evict feature
* @SDE_FEATURE_HW_FENCE_IPCC HW fence supports ipcc signaling in dpu
* @SDE_FEATURE_EMULATED_ENV Emulated environment supported
* @SDE_FEATURE_MAX: MAX features value
*/
enum sde_mdss_features {
@@ -765,6 +766,7 @@ enum sde_mdss_features {
SDE_FEATURE_CTL_DONE,
SDE_FEATURE_SYS_CACHE_NSE,
SDE_FEATURE_HW_FENCE_IPCC,
SDE_FEATURE_EMULATED_ENV,
SDE_FEATURE_MAX
};

View File

@@ -596,6 +596,7 @@ static int sde_hw_intf_collect_misr(struct sde_hw_intf *intf, bool nonblock,
{
struct sde_hw_blk_reg_map *c = &intf->hw;
u32 ctrl = 0;
int rc = 0;
if (!misr_value)
return -EINVAL;
@@ -603,12 +604,8 @@ static int sde_hw_intf_collect_misr(struct sde_hw_intf *intf, bool nonblock,
ctrl = SDE_REG_READ(c, INTF_MISR_CTRL);
if (!nonblock) {
if (ctrl & MISR_CTRL_ENABLE) {
int rc;
rc = readl_poll_timeout(c->base_off + c->blk_off +
INTF_MISR_CTRL, ctrl,
(ctrl & MISR_CTRL_STATUS) > 0, 500,
84000);
rc = read_poll_timeout(sde_reg_read, ctrl, (ctrl & MISR_CTRL_STATUS) > 0,
500, false, 84000, c, INTF_MISR_CTRL);
if (rc)
return rc;
} else {
@@ -617,7 +614,7 @@ static int sde_hw_intf_collect_misr(struct sde_hw_intf *intf, bool nonblock,
}
*misr_value = SDE_REG_READ(c, INTF_MISR_SIGNATURE);
return 0;
return rc;
}
static u32 sde_hw_intf_get_line_count(struct sde_hw_intf *intf)
@@ -744,16 +741,13 @@ static int sde_hw_intf_poll_timeout_wr_ptr(struct sde_hw_intf *intf,
{
struct sde_hw_blk_reg_map *c;
u32 val;
int rc;
if (!intf)
return -EINVAL;
c = &intf->hw;
rc = readl_poll_timeout(c->base_off + c->blk_off + INTF_TEAR_LINE_COUNT,
val, (val & 0xffff) >= 1, 10, timeout_us);
return rc;
return read_poll_timeout(sde_reg_read, val, (val & 0xffff) >= 1, 10, false, timeout_us,
c, INTF_TEAR_LINE_COUNT);
}
static int sde_hw_intf_enable_te(struct sde_hw_intf *intf, bool enable)

View File

@@ -14,6 +14,7 @@
#include "sde_hw_mdss.h"
#include "sde_dbg.h"
#include "sde_kms.h"
#include "sde_hw_util.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
@@ -257,6 +258,7 @@ static int sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx, bool nonblock,
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 ctrl = 0;
int rc = 0;
if (!misr_value)
return -EINVAL;
@@ -264,12 +266,8 @@ static int sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx, bool nonblock,
ctrl = SDE_REG_READ(c, LM_MISR_CTRL);
if (!nonblock) {
if (ctrl & MISR_CTRL_ENABLE) {
int rc;
rc = readl_poll_timeout(c->base_off + c->blk_off +
LM_MISR_CTRL, ctrl,
(ctrl & MISR_CTRL_STATUS) > 0, 500,
84000);
rc = read_poll_timeout(sde_reg_read, ctrl, (ctrl & MISR_CTRL_STATUS) > 0,
500, false, 84000, c, LM_MISR_CTRL);
if (rc)
return rc;
} else {
@@ -279,7 +277,7 @@ static int sde_hw_lm_collect_misr(struct sde_hw_mixer *ctx, bool nonblock,
*misr_value = SDE_REG_READ(c, LM_MISR_SIGNATURE);
return 0;
return rc;
}
static void sde_hw_clear_noise_layer(struct sde_hw_mixer *ctx)

View File

@@ -247,16 +247,13 @@ static int sde_hw_pp_poll_timeout_wr_ptr(struct sde_hw_pingpong *pp,
{
struct sde_hw_blk_reg_map *c;
u32 val;
int rc;
if (!pp)
return -EINVAL;
c = &pp->hw;
rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
val, (val & 0xffff) >= 1, 10, timeout_us);
return rc;
return read_poll_timeout(sde_reg_read, val, (val & 0xffff) >= 1,
10, false, timeout_us, c, PP_LINE_COUNT);
}
static void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp)

View File

@@ -1307,10 +1307,9 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
kick_off.dma_type, kick_off.op);
if (mode == REG_DMA_WAIT4_COMP) {
rc = readl_poll_timeout(hw.base_off + hw.blk_off +
reg_dma_intr_status_offset, val,
(val & ctl_trigger_done_mask[ctl->idx][q]),
10, 20000);
rc = read_poll_timeout(sde_reg_read, val,
(val & ctl_trigger_done_mask[ctl->idx][q]), 10, false, 20000,
&hw, reg_dma_intr_status_offset);
if (rc)
DRM_ERROR("poll wait failed %d val %x mask %x\n",
rc, val, ctl_trigger_done_mask[ctl->idx][q]);

View File

@@ -201,8 +201,8 @@ static int sde_hw_get_axi_halt_status(struct sde_hw_vbif *vbif)
struct sde_hw_blk_reg_map *c = &vbif->hw;
int ctrl = 0;
return readl_poll_timeout(c->base_off + c->blk_off +
VBIF_AXI_HALT_CTRL1, ctrl, ctrl & BIT(0), 100, 4000);
return read_poll_timeout(sde_reg_read, ctrl, (ctrl & BIT(0)),
100, false, 4000, c, VBIF_AXI_HALT_CTRL1);
}
static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,

View File

@@ -195,71 +195,6 @@ static int _sde_vbif_wait_for_axi_halt(struct sde_hw_vbif *vbif)
return rc;
}
int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id, u32 clk_ctrl)
{
struct sde_hw_vbif *vbif = NULL;
struct sde_hw_mdp *mdp;
bool forced_on = false;
bool status;
int rc = 0;
if (!sde_kms) {
SDE_ERROR("invalid argument\n");
return -EINVAL;
}
if (!sde_kms_is_vbif_operation_allowed(sde_kms)) {
SDE_DEBUG("vbif operations not permitted\n");
return 0;
}
vbif = sde_kms->hw_vbif[VBIF_RT];
mdp = sde_kms->hw_mdp;
if (!vbif || !mdp || !vbif->ops.get_xin_halt_status ||
!vbif->ops.set_xin_halt ||
!_sde_vbif_setup_clk_supported(sde_kms, clk_ctrl)) {
SDE_ERROR("invalid vbif or mdp arguments\n");
return -EINVAL;
}
mutex_lock(&vbif->mutex);
SDE_EVT32_VERBOSE(vbif->idx, xin_id);
/*
* If status is 0, then make sure client clock is not gated
* while halting by forcing it ON only if it was not previously
* forced on. If status is 1 then its already halted.
*/
status = vbif->ops.get_xin_halt_status(vbif, xin_id);
if (status) {
mutex_unlock(&vbif->mutex);
return 0;
}
forced_on = _sde_vbif_setup_clk_force_ctrl(sde_kms, clk_ctrl, true);
/* send halt request for unused plane's xin client */
vbif->ops.set_xin_halt(vbif, xin_id, true);
rc = _sde_vbif_wait_for_xin_halt(vbif, xin_id);
if (rc) {
SDE_ERROR(
"wait failed for pipe halt:xin_id %u, clk_ctrl %u, rc %u\n",
xin_id, clk_ctrl, rc);
SDE_EVT32(xin_id, clk_ctrl, rc, SDE_EVTLOG_ERROR);
}
/* open xin client to enable transactions */
vbif->ops.set_xin_halt(vbif, xin_id, false);
if (forced_on)
_sde_vbif_setup_clk_force_ctrl(sde_kms, clk_ctrl, false);
mutex_unlock(&vbif->mutex);
return rc;
}
/**
* _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
* @vbif: Pointer to hardware vbif driver
@@ -392,6 +327,9 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
!vbif->ops.set_xin_halt)
return;
if (test_bit(SDE_FEATURE_EMULATED_ENV, sde_kms->catalog->features))
return;
mutex_lock(&vbif->mutex);
SDE_EVT32_VERBOSE(vbif->idx, params->xin_id);
@@ -718,6 +656,9 @@ void sde_vbif_axi_halt_request(struct sde_kms *sde_kms)
return;
}
if (test_bit(SDE_FEATURE_EMULATED_ENV, sde_kms->catalog->features))
return;
for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
vbif = sde_kms->hw_vbif[i];
if (vbif && vbif->cap && vbif->ops.set_axi_halt) {

View File

@@ -135,20 +135,6 @@ void sde_vbif_init_memtypes(struct sde_kms *sde_kms);
*/
void sde_vbif_axi_halt_request(struct sde_kms *sde_kms);
/**
* sde_vbif_halt_plane_xin - halts the xin client for the unused plane
* On unused plane, check if the vbif for this plane is idle or not.
* If not then first force_on the planes clock and then send the
* halt request. Wait for some time then check for the vbif idle
* or not again.
* @sde_kms: SDE handler
* @xin_id: xin id of the unused plane
* @clk_ctrl: clk ctrl type for the unused plane
* Returns: 0 on success, error code otherwise
*/
int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id,
u32 clk_ctrl);
/**
* sde_vbif_halt_xin_mask - halts/unhalts all the xin clients present in
* the mask.

View File

@@ -21,6 +21,7 @@
#include "sde_dbg.h"
#include "sde/sde_hw_catalog.h"
#include "sde/sde_kms.h"
#include "sde/sde_hw_util.h"
#define SDE_DBG_BASE_MAX 10
@@ -2296,6 +2297,7 @@ static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
u32 data, cnt;
char buf[24];
int rc;
struct sde_hw_blk_reg_map c = {0};
if (!file)
return -EINVAL;
@@ -2347,7 +2349,9 @@ static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
goto end;
}
writel_relaxed(data, dbg->base + off);
c.base_off = dbg->base;
SDE_REG_WRITE(&c, off, data);
pm_runtime_put_sync(sde_dbg_base.dev);
@@ -2394,9 +2398,9 @@ static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
}
if (!dbg->buf) {
struct sde_hw_blk_reg_map c = {0};
char dump_buf[64];
char *ptr;
int cnt, tot;
u32 cur_offset = 0, tot = 0;
dbg->buf_len = sizeof(dump_buf) *
DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
@@ -2412,9 +2416,6 @@ static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
goto end;
}
ptr = dbg->base + dbg->off;
tot = 0;
rc = pm_runtime_resume_and_get(sde_dbg_base.dev);
if (rc < 0) {
pr_err("failed to enable power resource %d\n", rc);
@@ -2422,22 +2423,31 @@ static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
goto end;
}
for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
ROW_BYTES, GROUP_BYTES, dump_buf,
sizeof(dump_buf), false);
len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
"0x%08x: %s\n",
((int) (unsigned long) ptr) -
((int) (unsigned long) dbg->base),
dump_buf);
c.base_off = dbg->base;
c.blk_off = dbg->off;
ptr += ROW_BYTES;
tot += len;
while (cur_offset < dbg->cnt) {
u32 reg_val;
if (cur_offset == 0) {
tot += scnprintf(dbg->buf + tot, dbg->buf_len - tot,
"0x%08x:", ((int) dbg->off) - cur_offset);
} else if (!(cur_offset % ROW_BYTES)) { // Header
tot += scnprintf(dbg->buf + tot, dbg->buf_len - tot,
"\n0x%08x:", ((int) dbg->off) - cur_offset);
}
reg_val = SDE_REG_READ(&c, cur_offset);
cur_offset += sizeof(reg_val);
tot += scnprintf(dbg->buf + tot, dbg->buf_len - tot,
" %08x", reg_val);
if (tot >= dbg->buf_len)
break;
}
tot += scnprintf(dbg->buf + tot, dbg->buf_len - tot, "\n");
pm_runtime_put_sync(sde_dbg_base.dev);
dbg->buf_len = tot;