Merge "disp: msm: sde: add sys cache usage for static image"
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

commit
dfc3e3ddc8
@@ -171,6 +171,7 @@ enum msm_mdp_crtc_property {
|
||||
CRTC_PROP_CAPTURE_OUTPUT,
|
||||
|
||||
CRTC_PROP_IDLE_PC_STATE,
|
||||
CRTC_PROP_CACHE_STATE,
|
||||
|
||||
/* total # of properties */
|
||||
CRTC_PROP_COUNT
|
||||
|
@@ -1223,9 +1223,14 @@ int msm_gem_delayed_import(struct drm_gem_object *obj)
|
||||
if (msm_obj->flags & MSM_BO_SKIPSYNC)
|
||||
attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
||||
/*
|
||||
* All SMMU mapping are generated with cache hint.
|
||||
* SSPP cache hint will control the LLCC access.
|
||||
*/
|
||||
if (msm_obj->flags & MSM_BO_KEEPATTRS)
|
||||
attach->dma_map_attrs |=
|
||||
DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
|
||||
(DMA_ATTR_IOMMU_USE_UPSTREAM_HINT |
|
||||
DMA_ATTR_IOMMU_USE_LLC_NWA);
|
||||
|
||||
/*
|
||||
* dma_buf_map_attachment will call dma_map_sg for ion buffer
|
||||
|
@@ -229,6 +229,9 @@ static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (flags & MSM_BO_KEEPATTRS)
|
||||
attrs |= DMA_ATTR_IOMMU_USE_LLC_NWA;
|
||||
|
||||
/*
|
||||
* For import buffer type, dma_map_sg_attrs is called during
|
||||
* dma_buf_map_attachment and is not required to call again
|
||||
|
@@ -21,6 +21,7 @@
|
||||
#include "sde_trace.h"
|
||||
#include "sde_crtc.h"
|
||||
#include "sde_encoder.h"
|
||||
#include "sde_hw_catalog.h"
|
||||
#include "sde_core_perf.h"
|
||||
|
||||
#define SDE_PERF_MODE_STRING_SIZE 128
|
||||
@@ -293,19 +294,20 @@ static inline enum sde_crtc_client_type _get_sde_client_type(
|
||||
/**
|
||||
* @_sde_core_perf_activate_llcc() - Activates/deactivates the system llcc
|
||||
* @kms - pointer to the kms
|
||||
* @uid - ID for which the llcc would be activated
|
||||
* @type - llcc type to be activated
|
||||
* @activate - boolean to indicate if activate/deactivate the LLCC
|
||||
*
|
||||
* Function assumes that caller has already acquired the "sde_core_perf_lock",
|
||||
* which would protect from any race condition between CRTC's
|
||||
*/
|
||||
static int _sde_core_perf_activate_llcc(struct sde_kms *kms,
|
||||
u32 uid, bool activate)
|
||||
enum sde_sys_cache_type type, bool activate)
|
||||
{
|
||||
struct llcc_slice_desc *slice;
|
||||
struct drm_device *drm_dev;
|
||||
struct device *dev;
|
||||
struct platform_device *pdev;
|
||||
u32 llcc_id[SDE_SYS_CACHE_MAX] = {LLCC_ROTATOR, LLCC_DISP};
|
||||
int rc = 0;
|
||||
|
||||
if (!kms || !kms->dev || !kms->dev->dev) {
|
||||
@@ -319,51 +321,57 @@ static int _sde_core_perf_activate_llcc(struct sde_kms *kms,
|
||||
pdev = to_platform_device(dev);
|
||||
|
||||
/* If LLCC is already in the requested state, skip */
|
||||
SDE_EVT32(activate, kms->perf.llcc_active);
|
||||
if ((activate && kms->perf.llcc_active) ||
|
||||
(!activate && !kms->perf.llcc_active)) {
|
||||
SDE_DEBUG("skip llcc request:%d state:%d\n",
|
||||
activate, kms->perf.llcc_active);
|
||||
SDE_EVT32(activate, type, kms->perf.llcc_active[type]);
|
||||
if ((activate && kms->perf.llcc_active[type]) ||
|
||||
(!activate && !kms->perf.llcc_active[type])) {
|
||||
SDE_DEBUG("skip llcc type:%d request:%d state:%d\n",
|
||||
type, activate, kms->perf.llcc_active[type]);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
SDE_DEBUG("activate/deactivate the llcc request:%d state:%d\n",
|
||||
activate, kms->perf.llcc_active);
|
||||
SDE_DEBUG("%sactivate the llcc type:%d state:%d\n",
|
||||
activate ? "" : "de",
|
||||
type, kms->perf.llcc_active[type]);
|
||||
|
||||
slice = llcc_slice_getd(uid);
|
||||
slice = llcc_slice_getd(llcc_id[type]);
|
||||
if (IS_ERR_OR_NULL(slice)) {
|
||||
SDE_ERROR("failed to get llcc slice for uid:%d\n", uid);
|
||||
SDE_ERROR("failed to get llcc slice for uid:%d\n",
|
||||
llcc_id[type]);
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (activate) {
|
||||
llcc_slice_activate(slice);
|
||||
kms->perf.llcc_active = true;
|
||||
kms->perf.llcc_active[type] = true;
|
||||
} else {
|
||||
llcc_slice_deactivate(slice);
|
||||
kms->perf.llcc_active = false;
|
||||
kms->perf.llcc_active[type] = false;
|
||||
}
|
||||
|
||||
exit:
|
||||
if (rc)
|
||||
SDE_ERROR("error activating llcc:%d rc:%d\n",
|
||||
activate, rc);
|
||||
SDE_ERROR("error %sactivating llcc type:%d rc:%d\n",
|
||||
activate ? "" : "de", type, rc);
|
||||
return rc;
|
||||
|
||||
}
|
||||
|
||||
static void _sde_core_perf_crtc_update_llcc(struct sde_kms *kms,
|
||||
struct drm_crtc *crtc)
|
||||
static void _sde_core_perf_crtc_set_llcc_cache_type(struct sde_kms *kms,
|
||||
struct drm_crtc *crtc,
|
||||
enum sde_sys_cache_type type)
|
||||
{
|
||||
struct drm_crtc *tmp_crtc;
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct sde_sc_cfg *sc_cfg = kms->perf.catalog->sc_cfg;
|
||||
struct sde_core_perf_params *cur_perf;
|
||||
enum sde_crtc_client_type curr_client_type
|
||||
= sde_crtc_get_client_type(crtc);
|
||||
u32 total_llcc_active = 0;
|
||||
u32 llcc_active = 0;
|
||||
|
||||
if (!kms->perf.catalog->sc_cfg.has_sys_cache) {
|
||||
SDE_DEBUG("System Cache is not enabled!. Won't use\n");
|
||||
if (!sc_cfg[type].has_sys_cache) {
|
||||
SDE_DEBUG("System Cache %d is not enabled!. Won't use\n",
|
||||
type);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -374,23 +382,72 @@ static void _sde_core_perf_crtc_update_llcc(struct sde_kms *kms,
|
||||
|
||||
/* use current perf, which are the values voted */
|
||||
sde_crtc = to_sde_crtc(tmp_crtc);
|
||||
total_llcc_active |=
|
||||
sde_crtc->cur_perf.llcc_active;
|
||||
cur_perf = &sde_crtc->cur_perf;
|
||||
llcc_active |= cur_perf->llcc_active[type];
|
||||
|
||||
SDE_DEBUG("crtc=%d llcc:%u active:0x%x\n",
|
||||
tmp_crtc->base.id,
|
||||
sde_crtc->cur_perf.llcc_active,
|
||||
total_llcc_active);
|
||||
|
||||
if (total_llcc_active)
|
||||
break;
|
||||
SDE_DEBUG("crtc=%d type:%d llcc:%u active:0x%x\n",
|
||||
tmp_crtc->base.id, type,
|
||||
cur_perf->llcc_active[type],
|
||||
llcc_active);
|
||||
}
|
||||
}
|
||||
|
||||
_sde_core_perf_activate_llcc(kms, LLCC_ROTATOR,
|
||||
total_llcc_active ? true : false);
|
||||
_sde_core_perf_activate_llcc(kms, type,
|
||||
llcc_active ? true : false);
|
||||
}
|
||||
|
||||
void sde_core_perf_crtc_update_llcc(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_kms *kms;
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct sde_core_perf_params *old, *new;
|
||||
int update_llcc[SDE_SYS_CACHE_MAX] = {0, 0};
|
||||
int i;
|
||||
|
||||
if (!crtc) {
|
||||
SDE_ERROR("invalid crtc\n");
|
||||
return;
|
||||
}
|
||||
|
||||
kms = _sde_crtc_get_kms(crtc);
|
||||
if (!kms || !kms->catalog) {
|
||||
SDE_ERROR("invalid kms\n");
|
||||
return;
|
||||
}
|
||||
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
|
||||
old = &sde_crtc->cur_perf;
|
||||
new = &sde_crtc->new_perf;
|
||||
|
||||
mutex_lock(&sde_core_perf_lock);
|
||||
|
||||
if (_sde_core_perf_crtc_is_power_on(crtc)) {
|
||||
for (i = 0; i < SDE_SYS_CACHE_MAX; i++) {
|
||||
if (new->llcc_active[i] != old->llcc_active[i]) {
|
||||
SDE_DEBUG("crtc=%d llcc=%d new=%d old=%d",
|
||||
crtc->base.id, i,
|
||||
new->llcc_active[i],
|
||||
old->llcc_active[i]);
|
||||
|
||||
old->llcc_active[i] =
|
||||
new->llcc_active[i];
|
||||
update_llcc[i] = 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < SDE_SYS_CACHE_MAX; i++)
|
||||
update_llcc[i] = 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < SDE_SYS_CACHE_MAX; i++) {
|
||||
if (update_llcc[i])
|
||||
_sde_core_perf_crtc_set_llcc_cache_type(kms, crtc, i);
|
||||
}
|
||||
|
||||
mutex_unlock(&sde_core_perf_lock);
|
||||
};
|
||||
|
||||
static void _sde_core_uidle_setup_wd(struct sde_kms *kms,
|
||||
bool enable)
|
||||
{
|
||||
@@ -749,11 +806,98 @@ static u64 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
|
||||
return clk_rate;
|
||||
}
|
||||
|
||||
static void _sde_core_perf_crtc_update_check(struct drm_crtc *crtc,
|
||||
int params_changed,
|
||||
int *update_bus, int *update_clk)
|
||||
{
|
||||
struct sde_kms *kms = _sde_crtc_get_kms(crtc);
|
||||
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
|
||||
struct sde_core_perf_params *old = &sde_crtc->cur_perf;
|
||||
struct sde_core_perf_params *new = &sde_crtc->new_perf;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
|
||||
/*
|
||||
* cases for bus bandwidth update.
|
||||
* 1. new bandwidth vote - "ab or ib vote" is higher
|
||||
* than current vote for update request.
|
||||
* 2. new bandwidth vote - "ab or ib vote" is lower
|
||||
* than current vote at end of commit or stop.
|
||||
*/
|
||||
|
||||
if ((params_changed &&
|
||||
(new->bw_ctl[i] > old->bw_ctl[i])) ||
|
||||
(!params_changed &&
|
||||
(new->bw_ctl[i] < old->bw_ctl[i]))) {
|
||||
|
||||
SDE_DEBUG(
|
||||
"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
|
||||
crtc->base.id, params_changed,
|
||||
new->bw_ctl[i], old->bw_ctl[i]);
|
||||
old->bw_ctl[i] = new->bw_ctl[i];
|
||||
*update_bus |= BIT(i);
|
||||
}
|
||||
|
||||
if ((params_changed &&
|
||||
(new->max_per_pipe_ib[i] >
|
||||
old->max_per_pipe_ib[i])) ||
|
||||
(!params_changed &&
|
||||
(new->max_per_pipe_ib[i] <
|
||||
old->max_per_pipe_ib[i]))) {
|
||||
|
||||
SDE_DEBUG(
|
||||
"crtc=%d p=%d new_ib=%llu,old_ib=%llu\n",
|
||||
crtc->base.id, params_changed,
|
||||
new->max_per_pipe_ib[i],
|
||||
old->max_per_pipe_ib[i]);
|
||||
old->max_per_pipe_ib[i] =
|
||||
new->max_per_pipe_ib[i];
|
||||
*update_bus |= BIT(i);
|
||||
}
|
||||
|
||||
/* display rsc override during solver mode */
|
||||
if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
|
||||
get_sde_rsc_current_state(SDE_RSC_INDEX) !=
|
||||
SDE_RSC_CLK_STATE) {
|
||||
/* update new bandwidth in all cases */
|
||||
if (params_changed && ((new->bw_ctl[i] !=
|
||||
old->bw_ctl[i]) ||
|
||||
(new->max_per_pipe_ib[i] !=
|
||||
old->max_per_pipe_ib[i]))) {
|
||||
old->bw_ctl[i] = new->bw_ctl[i];
|
||||
old->max_per_pipe_ib[i] =
|
||||
new->max_per_pipe_ib[i];
|
||||
*update_bus |= BIT(i);
|
||||
/*
|
||||
* reduce bw vote is not required in solver
|
||||
* mode
|
||||
*/
|
||||
} else if (!params_changed) {
|
||||
*update_bus &= ~BIT(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (kms->perf.perf_tune.mode_changed &&
|
||||
kms->perf.perf_tune.min_core_clk)
|
||||
new->core_clk_rate = kms->perf.perf_tune.min_core_clk;
|
||||
|
||||
if ((params_changed &&
|
||||
(new->core_clk_rate > old->core_clk_rate)) ||
|
||||
(!params_changed && new->core_clk_rate &&
|
||||
(new->core_clk_rate < old->core_clk_rate)) ||
|
||||
kms->perf.perf_tune.mode_changed) {
|
||||
old->core_clk_rate = new->core_clk_rate;
|
||||
*update_clk = 1;
|
||||
kms->perf.perf_tune.mode_changed = false;
|
||||
}
|
||||
}
|
||||
|
||||
void sde_core_perf_crtc_update(struct drm_crtc *crtc,
|
||||
int params_changed, bool stop_req)
|
||||
{
|
||||
struct sde_core_perf_params *new, *old;
|
||||
int update_bus = 0, update_clk = 0, update_llcc = 0;
|
||||
int update_bus = 0, update_clk = 0;
|
||||
u64 clk_rate = 0;
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct sde_crtc_state *sde_cstate;
|
||||
@@ -793,104 +937,14 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
|
||||
new = &sde_crtc->new_perf;
|
||||
|
||||
if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
|
||||
|
||||
/*
|
||||
* cases for the llcc update.
|
||||
* 1. llcc is transitioning: 'inactive->active' during kickoff,
|
||||
* for current request.
|
||||
* 2. llcc is transitioning: 'active->inactive'at the end of the
|
||||
* commit or during stop
|
||||
*/
|
||||
|
||||
if ((params_changed &&
|
||||
new->llcc_active && !old->llcc_active) ||
|
||||
(!params_changed &&
|
||||
!new->llcc_active && old->llcc_active)) {
|
||||
|
||||
SDE_DEBUG("crtc=%d p=%d new_llcc=%d, old_llcc=%d\n",
|
||||
crtc->base.id, params_changed,
|
||||
new->llcc_active, old->llcc_active);
|
||||
|
||||
old->llcc_active = new->llcc_active;
|
||||
update_llcc = 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
|
||||
/*
|
||||
* cases for bus bandwidth update.
|
||||
* 1. new bandwidth vote - "ab or ib vote" is higher
|
||||
* than current vote for update request.
|
||||
* 2. new bandwidth vote - "ab or ib vote" is lower
|
||||
* than current vote at end of commit or stop.
|
||||
*/
|
||||
|
||||
if ((params_changed &&
|
||||
(new->bw_ctl[i] > old->bw_ctl[i])) ||
|
||||
(!params_changed &&
|
||||
(new->bw_ctl[i] < old->bw_ctl[i]))) {
|
||||
|
||||
SDE_DEBUG(
|
||||
"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
|
||||
crtc->base.id, params_changed,
|
||||
new->bw_ctl[i], old->bw_ctl[i]);
|
||||
old->bw_ctl[i] = new->bw_ctl[i];
|
||||
update_bus |= BIT(i);
|
||||
}
|
||||
|
||||
if ((params_changed &&
|
||||
(new->max_per_pipe_ib[i] >
|
||||
old->max_per_pipe_ib[i])) ||
|
||||
(!params_changed &&
|
||||
(new->max_per_pipe_ib[i] <
|
||||
old->max_per_pipe_ib[i]))) {
|
||||
|
||||
SDE_DEBUG(
|
||||
"crtc=%d p=%d new_ib=%llu,old_ib=%llu\n",
|
||||
crtc->base.id, params_changed,
|
||||
new->max_per_pipe_ib[i],
|
||||
old->max_per_pipe_ib[i]);
|
||||
old->max_per_pipe_ib[i] =
|
||||
new->max_per_pipe_ib[i];
|
||||
update_bus |= BIT(i);
|
||||
}
|
||||
|
||||
/* display rsc override during solver mode */
|
||||
if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
|
||||
get_sde_rsc_current_state(SDE_RSC_INDEX) !=
|
||||
SDE_RSC_CLK_STATE) {
|
||||
/* update new bandwidth in all cases */
|
||||
if (params_changed && ((new->bw_ctl[i] !=
|
||||
old->bw_ctl[i]) ||
|
||||
(new->max_per_pipe_ib[i] !=
|
||||
old->max_per_pipe_ib[i]))) {
|
||||
old->bw_ctl[i] = new->bw_ctl[i];
|
||||
old->max_per_pipe_ib[i] =
|
||||
new->max_per_pipe_ib[i];
|
||||
update_bus |= BIT(i);
|
||||
/*
|
||||
* reduce bw vote is not required in solver
|
||||
* mode
|
||||
*/
|
||||
} else if (!params_changed) {
|
||||
update_bus &= ~BIT(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((params_changed &&
|
||||
(new->core_clk_rate > old->core_clk_rate)) ||
|
||||
(!params_changed && new->core_clk_rate &&
|
||||
(new->core_clk_rate < old->core_clk_rate))) {
|
||||
old->core_clk_rate = new->core_clk_rate;
|
||||
update_clk = 1;
|
||||
}
|
||||
_sde_core_perf_crtc_update_check(crtc, params_changed,
|
||||
&update_bus, &update_clk);
|
||||
} else {
|
||||
SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
|
||||
memset(old, 0, sizeof(*old));
|
||||
memset(new, 0, sizeof(*new));
|
||||
update_bus = ~0;
|
||||
update_clk = 1;
|
||||
update_llcc = 1;
|
||||
}
|
||||
trace_sde_perf_crtc_update(crtc->base.id,
|
||||
new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC],
|
||||
@@ -902,9 +956,6 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
|
||||
new->core_clk_rate, stop_req,
|
||||
update_bus, update_clk, params_changed);
|
||||
|
||||
if (update_llcc)
|
||||
_sde_core_perf_crtc_update_llcc(kms, crtc);
|
||||
|
||||
for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
|
||||
if (update_bus & BIT(i))
|
||||
_sde_core_perf_crtc_update_bus(kms, crtc, i);
|
||||
@@ -1056,6 +1107,7 @@ static ssize_t _sde_core_perf_mode_write(struct file *file,
|
||||
DRM_INFO("normal performance mode\n");
|
||||
}
|
||||
perf->perf_tune.mode = perf_mode;
|
||||
perf->perf_tune.mode_changed = true;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@@ -39,7 +39,7 @@ struct sde_core_perf_params {
|
||||
u64 max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MAX];
|
||||
u64 bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MAX];
|
||||
u64 core_clk_rate;
|
||||
bool llcc_active;
|
||||
bool llcc_active[SDE_SYS_CACHE_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -47,11 +47,13 @@ struct sde_core_perf_params {
|
||||
* @mode: performance mode
|
||||
* @min_core_clk: minimum core clock
|
||||
* @min_bus_vote: minimum bus vote
|
||||
* @mode_changed: indicate if clock tuning strategy changed
|
||||
*/
|
||||
struct sde_core_perf_tune {
|
||||
u32 mode;
|
||||
u64 min_core_clk;
|
||||
u64 min_bus_vote;
|
||||
bool mode_changed;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -92,10 +94,16 @@ struct sde_core_perf {
|
||||
u32 bw_vote_mode;
|
||||
bool sde_rsc_available;
|
||||
bool bw_vote_mode_updated;
|
||||
bool llcc_active;
|
||||
bool llcc_active[SDE_SYS_CACHE_MAX];
|
||||
bool uidle_enabled;
|
||||
};
|
||||
|
||||
/**
|
||||
* sde_core_perf_crtc_update_llcc - update llcc performance for crtc
|
||||
* @crtc: Pointer to crtc
|
||||
*/
|
||||
void sde_core_perf_crtc_update_llcc(struct drm_crtc *crtc);
|
||||
|
||||
/**
|
||||
* sde_core_perf_crtc_check - validate performance of the given crtc state
|
||||
* @crtc: Pointer to crtc
|
||||
|
@@ -3295,7 +3295,7 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct msm_drm_thread *event_thread;
|
||||
struct sde_crtc_state *cstate;
|
||||
struct sde_kms *sde_kms;
|
||||
int idle_time = 0;
|
||||
int idle_time = 0, i;
|
||||
|
||||
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
|
||||
SDE_ERROR("invalid crtc\n");
|
||||
@@ -3334,6 +3334,12 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
event_thread = &priv->event_thread[crtc->index];
|
||||
idle_time = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_TIMEOUT);
|
||||
|
||||
if (sde_crtc_get_property(cstate, CRTC_PROP_CACHE_STATE))
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_FRAME_WRITE,
|
||||
false);
|
||||
else
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_NORMAL, false);
|
||||
|
||||
/*
|
||||
* If no mixers has been allocated in sde_crtc_atomic_check(),
|
||||
* it means we are trying to flush a CRTC whose state is disabled:
|
||||
@@ -3354,13 +3360,18 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
* so during the perf update driver can activate/deactivate
|
||||
* the cache accordingly.
|
||||
*/
|
||||
sde_crtc->new_perf.llcc_active = false;
|
||||
for (i = 0; i < SDE_SYS_CACHE_MAX; i++)
|
||||
sde_crtc->new_perf.llcc_active[i] = false;
|
||||
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
sde_plane_restore(plane);
|
||||
|
||||
if (sde_plane_is_cache_required(plane))
|
||||
sde_crtc->new_perf.llcc_active = true;
|
||||
for (i = 0; i < SDE_SYS_CACHE_MAX; i++) {
|
||||
if (sde_plane_is_cache_required(plane, i))
|
||||
sde_crtc->new_perf.llcc_active[i] = true;
|
||||
}
|
||||
}
|
||||
sde_core_perf_crtc_update_llcc(crtc);
|
||||
|
||||
/* wait for acquire fences before anything else is done */
|
||||
_sde_crtc_wait_for_fences(crtc);
|
||||
@@ -3969,6 +3980,22 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
|
||||
mutex_unlock(&sde_crtc->crtc_lock);
|
||||
}
|
||||
|
||||
static void _sde_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
|
||||
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
|
||||
|
||||
memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
|
||||
sde_crtc->num_mixers = 0;
|
||||
sde_crtc->mixers_swapped = false;
|
||||
|
||||
/* disable clk & bw control until clk & bw properties are set */
|
||||
cstate->bw_control = false;
|
||||
cstate->bw_split_vote = false;
|
||||
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_DISABLED, false);
|
||||
}
|
||||
|
||||
static void sde_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_kms *sde_kms;
|
||||
@@ -4092,14 +4119,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
|
||||
ktime_get());
|
||||
}
|
||||
|
||||
_sde_crtc_clear_all_blend_stages(sde_crtc);
|
||||
memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
|
||||
sde_crtc->num_mixers = 0;
|
||||
sde_crtc->mixers_swapped = false;
|
||||
|
||||
/* disable clk & bw control until clk & bw properties are set */
|
||||
cstate->bw_control = false;
|
||||
cstate->bw_split_vote = false;
|
||||
_sde_crtc_reset(crtc);
|
||||
|
||||
mutex_unlock(&sde_crtc->crtc_lock);
|
||||
}
|
||||
@@ -4168,6 +4188,9 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
|
||||
crtc->state->encoder_mask) {
|
||||
sde_encoder_register_frame_event_callback(encoder,
|
||||
sde_crtc_frame_event_cb, crtc);
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_NORMAL,
|
||||
sde_encoder_check_curr_mode(encoder,
|
||||
MSM_DISPLAY_VIDEO_MODE));
|
||||
}
|
||||
|
||||
sde_crtc->enabled = true;
|
||||
@@ -5124,6 +5147,11 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
|
||||
{IDLE_PC_DISABLE, "idle_pc_disable"},
|
||||
};
|
||||
|
||||
static const struct drm_prop_enum_list e_cache_state[] = {
|
||||
{CACHE_STATE_DISABLED, "cache_state_disabled"},
|
||||
{CACHE_STATE_ENABLED, "cache_state_enabled"},
|
||||
};
|
||||
|
||||
SDE_DEBUG("\n");
|
||||
|
||||
if (!crtc || !catalog) {
|
||||
@@ -5182,6 +5210,11 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
|
||||
ARRAY_SIZE(e_secure_level),
|
||||
CRTC_PROP_SECURITY_LEVEL);
|
||||
|
||||
msm_property_install_enum(&sde_crtc->property_info, "cache_state",
|
||||
0x0, 0, e_cache_state,
|
||||
ARRAY_SIZE(e_cache_state),
|
||||
CRTC_PROP_CACHE_STATE);
|
||||
|
||||
if (catalog->has_dim_layer) {
|
||||
msm_property_install_volatile_range(&sde_crtc->property_info,
|
||||
"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
|
||||
@@ -6143,6 +6176,121 @@ static int _sde_crtc_init_events(struct sde_crtc *sde_crtc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void sde_crtc_static_img_control(struct drm_crtc *crtc,
|
||||
enum sde_crtc_cache_state state,
|
||||
bool is_vidmode)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
struct sde_crtc *sde_crtc;
|
||||
|
||||
if (!crtc || !crtc->dev)
|
||||
return;
|
||||
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
|
||||
switch (state) {
|
||||
case CACHE_STATE_NORMAL:
|
||||
if (sde_crtc->cache_state == CACHE_STATE_DISABLED
|
||||
&& !is_vidmode)
|
||||
return;
|
||||
|
||||
kthread_cancel_delayed_work_sync(
|
||||
&sde_crtc->static_cache_read_work);
|
||||
break;
|
||||
case CACHE_STATE_PRE_CACHE:
|
||||
if (sde_crtc->cache_state != CACHE_STATE_NORMAL)
|
||||
return;
|
||||
break;
|
||||
case CACHE_STATE_FRAME_WRITE:
|
||||
if (sde_crtc->cache_state != CACHE_STATE_PRE_CACHE)
|
||||
return;
|
||||
break;
|
||||
case CACHE_STATE_FRAME_READ:
|
||||
if (sde_crtc->cache_state != CACHE_STATE_FRAME_WRITE)
|
||||
return;
|
||||
break;
|
||||
case CACHE_STATE_DISABLED:
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
sde_crtc->cache_state = state;
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc)
|
||||
sde_plane_static_img_control(plane, state);
|
||||
}
|
||||
|
||||
/*
|
||||
* __sde_crtc_static_cache_read_work - transition to cache read
|
||||
*/
|
||||
void __sde_crtc_static_cache_read_work(struct kthread_work *work)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc,
|
||||
static_cache_read_work.work);
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_plane *plane;
|
||||
struct sde_crtc_mixer *mixer;
|
||||
struct sde_hw_ctl *ctl;
|
||||
|
||||
if (!sde_crtc)
|
||||
return;
|
||||
|
||||
crtc = &sde_crtc->base;
|
||||
mixer = sde_crtc->mixers;
|
||||
if (!mixer)
|
||||
return;
|
||||
|
||||
ctl = mixer->hw_ctl;
|
||||
|
||||
if (sde_crtc->cache_state != CACHE_STATE_FRAME_WRITE ||
|
||||
!ctl->ops.update_bitmask_ctl ||
|
||||
!ctl->ops.trigger_flush)
|
||||
return;
|
||||
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_FRAME_READ, false);
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
if (!plane->state)
|
||||
continue;
|
||||
|
||||
sde_plane_ctl_flush(plane, ctl, true);
|
||||
}
|
||||
ctl->ops.update_bitmask_ctl(ctl, true);
|
||||
ctl->ops.trigger_flush(ctl);
|
||||
}
|
||||
|
||||
void sde_crtc_static_cache_read_kickoff(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct msm_drm_private *priv;
|
||||
struct msm_drm_thread *disp_thread;
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct sde_crtc_state *cstate;
|
||||
u32 msecs_fps = 0;
|
||||
|
||||
if (!crtc)
|
||||
return;
|
||||
|
||||
dev = crtc->dev;
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
cstate = to_sde_crtc_state(crtc->state);
|
||||
|
||||
if (!dev || !dev->dev_private || !sde_crtc)
|
||||
return;
|
||||
|
||||
priv = dev->dev_private;
|
||||
disp_thread = &priv->disp_thread[crtc->index];
|
||||
|
||||
if (sde_crtc->cache_state != CACHE_STATE_FRAME_WRITE)
|
||||
return;
|
||||
|
||||
msecs_fps = DIV_ROUND_UP((1 * 1000), sde_crtc_get_fps_mode(crtc));
|
||||
|
||||
/* Kickoff transition to read state after next vblank */
|
||||
kthread_queue_delayed_work(&disp_thread->worker,
|
||||
&sde_crtc->static_cache_read_work,
|
||||
msecs_to_jiffies(msecs_fps));
|
||||
}
|
||||
|
||||
/*
|
||||
* __sde_crtc_idle_notify_work - signal idle timeout to user space
|
||||
*/
|
||||
@@ -6164,6 +6312,8 @@ static void __sde_crtc_idle_notify_work(struct kthread_work *work)
|
||||
&event, (u8 *)&ret);
|
||||
|
||||
SDE_DEBUG("crtc[%d]: idle timeout notified\n", crtc->base.id);
|
||||
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_PRE_CACHE, false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6252,11 +6402,15 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
|
||||
sde_cp_crtc_init(crtc);
|
||||
sde_cp_crtc_install_properties(crtc);
|
||||
|
||||
sde_crtc->cur_perf.llcc_active = false;
|
||||
sde_crtc->new_perf.llcc_active = false;
|
||||
for (i = 0; i < SDE_SYS_CACHE_MAX; i++) {
|
||||
sde_crtc->cur_perf.llcc_active[i] = false;
|
||||
sde_crtc->new_perf.llcc_active[i] = false;
|
||||
}
|
||||
|
||||
kthread_init_delayed_work(&sde_crtc->idle_notify_work,
|
||||
__sde_crtc_idle_notify_work);
|
||||
kthread_init_delayed_work(&sde_crtc->static_cache_read_work,
|
||||
__sde_crtc_static_cache_read_work);
|
||||
|
||||
SDE_DEBUG("crtc=%d new_llcc=%d, old_llcc=%d\n",
|
||||
crtc->base.id,
|
||||
|
@@ -70,6 +70,24 @@ enum sde_crtc_idle_pc_state {
|
||||
IDLE_PC_DISABLE,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum sde_crtc_cache_state: states of disp system cache
|
||||
* CACHE_STATE_DISABLED: sys cache has been disabled
|
||||
* CACHE_STATE_ENABLED: sys cache has been enabled
|
||||
* CACHE_STATE_NORMAL: sys cache is normal state
|
||||
* CACHE_STATE_PRE_CACHE: frame cache is being prepared
|
||||
* CACHE_STATE_FRAME_WRITE: sys cache is being written to
|
||||
* CACHE_STATE_FRAME_READ: sys cache is being read
|
||||
*/
|
||||
enum sde_crtc_cache_state {
|
||||
CACHE_STATE_DISABLED,
|
||||
CACHE_STATE_ENABLED,
|
||||
CACHE_STATE_NORMAL,
|
||||
CACHE_STATE_PRE_CACHE,
|
||||
CACHE_STATE_FRAME_WRITE,
|
||||
CACHE_STATE_FRAME_READ
|
||||
};
|
||||
|
||||
/**
|
||||
* @connectors : Currently associated drm connectors for retire event
|
||||
* @num_connectors: Number of associated drm connectors for retire event
|
||||
@@ -256,6 +274,8 @@ struct sde_crtc_misr_info {
|
||||
* @needs_hw_reset : Initiate a hw ctl reset
|
||||
* @src_bpp : source bpp used to calculate compression ratio
|
||||
* @target_bpp : target bpp used to calculate compression ratio
|
||||
* @static_cache_read_work: delayed worker to transition cache state to read
|
||||
* @cache_state : Current static image cache state
|
||||
*/
|
||||
struct sde_crtc {
|
||||
struct drm_crtc base;
|
||||
@@ -340,6 +360,9 @@ struct sde_crtc {
|
||||
|
||||
int src_bpp;
|
||||
int target_bpp;
|
||||
|
||||
struct kthread_delayed_work static_cache_read_work;
|
||||
enum sde_crtc_cache_state cache_state;
|
||||
};
|
||||
|
||||
enum sde_crtc_dirty_flags {
|
||||
@@ -858,4 +881,19 @@ static inline void sde_crtc_set_bpp(struct sde_crtc *sde_crtc, int src_bpp,
|
||||
sde_crtc->target_bpp = target_bpp;
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_crtc_static_img_control - transition static img cache state
|
||||
* @crtc: Pointer to drm crtc structure
|
||||
* @state: cache state to transition to
|
||||
* @is_vidmode: if encoder is video mode
|
||||
*/
|
||||
void sde_crtc_static_img_control(struct drm_crtc *crtc,
|
||||
enum sde_crtc_cache_state state, bool is_vidmode);
|
||||
|
||||
/**
|
||||
* sde_crtc_static_cache_read_kickoff - kickoff cache read work
|
||||
* @crtc: Pointer to drm crtc structure
|
||||
*/
|
||||
void sde_crtc_static_cache_read_kickoff(struct drm_crtc *crtc);
|
||||
|
||||
#endif /* _SDE_CRTC_H_ */
|
||||
|
@@ -468,6 +468,11 @@ enum {
|
||||
UIDLE_PROP_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_CONTROLLER,
|
||||
CACHE_CONTROLLER_PROP_MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
REG_DMA_OFF,
|
||||
REG_DMA_ID,
|
||||
@@ -848,6 +853,10 @@ static struct sde_prop_type uidle_prop[] = {
|
||||
{UIDLE_LEN, "qcom,sde-uidle-size", false, PROP_TYPE_U32},
|
||||
};
|
||||
|
||||
static struct sde_prop_type cache_prop[] = {
|
||||
{CACHE_CONTROLLER, "qcom,llcc-v2", false, PROP_TYPE_NODE},
|
||||
};
|
||||
|
||||
static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
|
||||
[REG_DMA_OFF] = {REG_DMA_OFF, "qcom,sde-reg-dma-off", false,
|
||||
PROP_TYPE_U32_ARRAY},
|
||||
@@ -1392,6 +1401,7 @@ static int _sde_sspp_setup_vigs(struct device_node *np,
|
||||
int i;
|
||||
struct sde_dt_props *props;
|
||||
struct device_node *snp = NULL;
|
||||
struct sde_sc_cfg *sc_cfg = sde_cfg->sc_cfg;
|
||||
int vig_count = 0;
|
||||
const char *type;
|
||||
|
||||
@@ -1474,15 +1484,19 @@ static int _sde_sspp_setup_vigs(struct device_node *np,
|
||||
MAX_DOWNSCALE_RATIO_INROT_NRT_DEFAULT;
|
||||
}
|
||||
|
||||
if (sde_cfg->sc_cfg.has_sys_cache) {
|
||||
if (sc_cfg[SDE_SYS_CACHE_ROT].has_sys_cache) {
|
||||
set_bit(SDE_PERF_SSPP_SYS_CACHE, &sspp->perf_features);
|
||||
sblk->llcc_scid = sde_cfg->sc_cfg.llcc_scid;
|
||||
sblk->llcc_scid =
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_scid;
|
||||
sblk->llcc_slice_size =
|
||||
sde_cfg->sc_cfg.llcc_slice_size;
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_slice_size;
|
||||
}
|
||||
|
||||
if (sde_cfg->inline_disable_const_clr)
|
||||
set_bit(SDE_SSPP_INLINE_CONST_CLR, &sspp->features);
|
||||
|
||||
if (sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache)
|
||||
set_bit(SDE_PERF_SSPP_SYS_CACHE, &sspp->perf_features);
|
||||
}
|
||||
|
||||
sde_put_dt_props(props);
|
||||
@@ -2355,62 +2369,6 @@ end:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sde_rot_parse_dt(struct device_node *np,
|
||||
struct sde_mdss_cfg *sde_cfg)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
struct of_phandle_args phargs;
|
||||
struct llcc_slice_desc *slice;
|
||||
int rc = 0;
|
||||
|
||||
rc = of_parse_phandle_with_args(np,
|
||||
"qcom,sde-inline-rotator", "#list-cells",
|
||||
0, &phargs);
|
||||
|
||||
if (rc) {
|
||||
/*
|
||||
* This is not a fatal error, system cache can be disabled
|
||||
* in device tree
|
||||
*/
|
||||
SDE_DEBUG("sys cache will be disabled rc:%d\n", rc);
|
||||
rc = 0;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!phargs.np || !phargs.args_count) {
|
||||
SDE_ERROR("wrong phandle args %d %d\n",
|
||||
!phargs.np, !phargs.args_count);
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(phargs.np);
|
||||
if (!pdev) {
|
||||
SDE_ERROR("invalid sde rotator node\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
slice = llcc_slice_getd(LLCC_ROTATOR);
|
||||
if (IS_ERR_OR_NULL(slice)) {
|
||||
SDE_ERROR("failed to get rotator slice!\n");
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
sde_cfg->sc_cfg.llcc_scid = llcc_get_slice_id(slice);
|
||||
sde_cfg->sc_cfg.llcc_slice_size = llcc_get_slice_size(slice);
|
||||
llcc_slice_putd(slice);
|
||||
|
||||
sde_cfg->sc_cfg.has_sys_cache = true;
|
||||
|
||||
SDE_DEBUG("rotator llcc scid:%d slice_size:%zukb\n",
|
||||
sde_cfg->sc_cfg.llcc_scid, sde_cfg->sc_cfg.llcc_slice_size);
|
||||
cleanup:
|
||||
of_node_put(phargs.np);
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sde_dspp_top_parse_dt(struct device_node *np,
|
||||
struct sde_mdss_cfg *sde_cfg)
|
||||
{
|
||||
@@ -3230,6 +3188,100 @@ end:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sde_cache_parse_dt(struct device_node *np,
|
||||
struct sde_mdss_cfg *sde_cfg)
|
||||
{
|
||||
struct llcc_slice_desc *slice;
|
||||
struct platform_device *pdev;
|
||||
struct of_phandle_args phargs;
|
||||
struct sde_sc_cfg *sc_cfg = sde_cfg->sc_cfg;
|
||||
struct sde_dt_props *props;
|
||||
int rc = 0;
|
||||
u32 off_count;
|
||||
|
||||
if (!sde_cfg) {
|
||||
SDE_ERROR("invalid argument\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
props = sde_get_dt_props(np, CACHE_CONTROLLER_PROP_MAX, cache_prop,
|
||||
ARRAY_SIZE(cache_prop), &off_count);
|
||||
if (IS_ERR_OR_NULL(props))
|
||||
return PTR_ERR(props);
|
||||
|
||||
if (!props->exists[CACHE_CONTROLLER]) {
|
||||
SDE_DEBUG("cache controller missing, will disable img cache:%d",
|
||||
props->exists[CACHE_CONTROLLER]);
|
||||
rc = 0;
|
||||
goto end;
|
||||
}
|
||||
|
||||
slice = llcc_slice_getd(LLCC_DISP);
|
||||
if (IS_ERR_OR_NULL(slice)) {
|
||||
SDE_ERROR("failed to get system cache %ld\n",
|
||||
PTR_ERR(slice));
|
||||
} else {
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid = llcc_get_slice_id(slice);
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].llcc_slice_size =
|
||||
llcc_get_slice_size(slice);
|
||||
SDE_DEBUG("img cache scid:%d slice_size:%zu kb\n",
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid,
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].llcc_slice_size);
|
||||
llcc_slice_putd(slice);
|
||||
}
|
||||
|
||||
/* Read inline rot node */
|
||||
rc = of_parse_phandle_with_args(np,
|
||||
"qcom,sde-inline-rotator", "#list-cells", 0, &phargs);
|
||||
if (rc) {
|
||||
/*
|
||||
* This is not a fatal error, system cache can be disabled
|
||||
* in device tree
|
||||
*/
|
||||
SDE_DEBUG("sys cache will be disabled rc:%d\n", rc);
|
||||
rc = 0;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!phargs.np || !phargs.args_count) {
|
||||
SDE_ERROR("wrong phandle args %d %d\n",
|
||||
!phargs.np, !phargs.args_count);
|
||||
rc = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(phargs.np);
|
||||
if (!pdev) {
|
||||
SDE_ERROR("invalid sde rotator node\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
slice = llcc_slice_getd(LLCC_ROTATOR);
|
||||
if (IS_ERR_OR_NULL(slice)) {
|
||||
SDE_ERROR("failed to get rotator slice!\n");
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_scid = llcc_get_slice_id(slice);
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_slice_size =
|
||||
llcc_get_slice_size(slice);
|
||||
llcc_slice_putd(slice);
|
||||
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].has_sys_cache = true;
|
||||
|
||||
SDE_DEBUG("rotator llcc scid:%d slice_size:%zukb\n",
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_scid,
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_slice_size);
|
||||
|
||||
cleanup:
|
||||
of_node_put(phargs.np);
|
||||
end:
|
||||
sde_put_dt_props(props);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int _sde_vbif_populate_ot_parsing(struct sde_vbif_cfg *vbif,
|
||||
struct sde_prop_value *prop_value, int *prop_count)
|
||||
{
|
||||
@@ -4952,10 +5004,6 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
rc = sde_rot_parse_dt(np, sde_cfg);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
/* uidle must be done before sspp and ctl,
|
||||
* so if something goes wrong, we won't
|
||||
* enable it in ctl and sspp.
|
||||
@@ -4964,6 +5012,10 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
rc = sde_cache_parse_dt(np, sde_cfg);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
rc = sde_ctl_parse_dt(np, sde_cfg);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
@@ -149,6 +149,18 @@ enum {
|
||||
#define SSPP_SYS_CACHE_OP_TYPE BIT(3)
|
||||
#define SSPP_SYS_CACHE_NO_ALLOC BIT(4)
|
||||
|
||||
/**
|
||||
* sde_sys_cache_type: Types of system cache supported
|
||||
* SDE_SYS_CACHE_ROT: Rotator system cache
|
||||
* SDE_SYS_CACHE_DISP: Static img system cache
|
||||
*/
|
||||
enum sde_sys_cache_type {
|
||||
SDE_SYS_CACHE_ROT,
|
||||
SDE_SYS_CACHE_DISP,
|
||||
SDE_SYS_CACHE_MAX,
|
||||
SDE_SYS_CACHE_NONE
|
||||
};
|
||||
|
||||
/**
|
||||
* All INTRs relevant for a specific target should be enabled via
|
||||
* _add_to_irq_offset_list()
|
||||
@@ -1493,7 +1505,7 @@ struct sde_mdss_cfg {
|
||||
bool has_demura;
|
||||
u32 demura_supported[SSPP_MAX][2];
|
||||
|
||||
struct sde_sc_cfg sc_cfg;
|
||||
struct sde_sc_cfg sc_cfg[SDE_SYS_CACHE_MAX];
|
||||
|
||||
bool sui_misr_supported;
|
||||
u32 sui_block_xin_mask;
|
||||
|
@@ -52,7 +52,7 @@
|
||||
|
||||
#define CTL_MIXER_BORDER_OUT BIT(24)
|
||||
#define CTL_FLUSH_MASK_ROT BIT(27)
|
||||
#define CTL_FLUSH_MASK_CTL BIT(17)
|
||||
#define CTL_FLUSH_CTL 17
|
||||
|
||||
#define CTL_NUM_EXT 4
|
||||
#define CTL_SSPP_MAX_RECTS 2
|
||||
@@ -410,6 +410,16 @@ static inline void sde_hw_ctl_uidle_enable(struct sde_hw_ctl *ctx, bool enable)
|
||||
SDE_REG_WRITE(&ctx->hw, CTL_UIDLE_ACTIVE, val);
|
||||
}
|
||||
|
||||
static inline int sde_hw_ctl_update_bitmask_ctl(struct sde_hw_ctl *ctx,
|
||||
bool enable)
|
||||
{
|
||||
if (!ctx)
|
||||
return -EINVAL;
|
||||
|
||||
UPDATE_MASK(ctx->flush.pending_flush_mask, CTL_FLUSH_CTL, enable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int sde_hw_ctl_update_bitmask_sspp(struct sde_hw_ctl *ctx,
|
||||
enum sde_sspp sspp,
|
||||
bool enable)
|
||||
@@ -439,7 +449,7 @@ static inline int sde_hw_ctl_update_bitmask_mixer(struct sde_hw_ctl *ctx,
|
||||
}
|
||||
|
||||
UPDATE_MASK(ctx->flush.pending_flush_mask, mixer_tbl[lm], enable);
|
||||
ctx->flush.pending_flush_mask |= CTL_FLUSH_MASK_CTL;
|
||||
sde_hw_ctl_update_bitmask_ctl(ctx, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1366,6 +1376,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
|
||||
ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
|
||||
ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
|
||||
ops->get_staged_sspp = sde_hw_ctl_get_staged_sspp;
|
||||
ops->update_bitmask_ctl = sde_hw_ctl_update_bitmask_ctl;
|
||||
ops->update_bitmask_sspp = sde_hw_ctl_update_bitmask_sspp;
|
||||
ops->update_bitmask_mixer = sde_hw_ctl_update_bitmask_mixer;
|
||||
ops->reg_dma_flush = sde_hw_reg_dma_flush;
|
||||
|
@@ -319,6 +319,13 @@ struct sde_hw_ctl_ops {
|
||||
*/
|
||||
int (*wait_reset_status)(struct sde_hw_ctl *ctx);
|
||||
|
||||
/**
|
||||
* update_bitmask_ctl: updates mask corresponding to ctl
|
||||
* @enable : true to enable, false to disable
|
||||
*/
|
||||
int (*update_bitmask_ctl)(struct sde_hw_ctl *ctx,
|
||||
bool enable);
|
||||
|
||||
/**
|
||||
* update_bitmask_sspp: updates mask corresponding to sspp
|
||||
* @blk : blk id
|
||||
@@ -328,7 +335,7 @@ struct sde_hw_ctl_ops {
|
||||
enum sde_sspp blk, bool enable);
|
||||
|
||||
/**
|
||||
* update_bitmask_sspp: updates mask corresponding to sspp
|
||||
* update_bitmask_mixer: updates mask corresponding to mixer
|
||||
* @blk : blk id
|
||||
* @enable : true to enable, 0 to disable
|
||||
*/
|
||||
@@ -336,7 +343,7 @@ struct sde_hw_ctl_ops {
|
||||
enum sde_lm blk, bool enable);
|
||||
|
||||
/**
|
||||
* update_bitmask_sspp: updates mask corresponding to sspp
|
||||
* update_bitmask_dspp: updates mask corresponding to dspp
|
||||
* @blk : blk id
|
||||
* @enable : true to enable, 0 to disable
|
||||
*/
|
||||
@@ -344,7 +351,7 @@ struct sde_hw_ctl_ops {
|
||||
enum sde_dspp blk, bool enable);
|
||||
|
||||
/**
|
||||
* update_bitmask_sspp: updates mask corresponding to sspp
|
||||
* update_bitmask_dspp_pavlut: updates mask corresponding to dspp pav
|
||||
* @blk : blk id
|
||||
* @enable : true to enable, 0 to disable
|
||||
*/
|
||||
|
@@ -237,6 +237,7 @@ enum {
|
||||
*/
|
||||
enum {
|
||||
SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE,
|
||||
SDE_PIPE_SC_RD_OP_TYPE_RESERVED,
|
||||
SDE_PIPE_SC_RD_OP_TYPE_INVALIDATE,
|
||||
SDE_PIPE_SC_RD_OP_TYPE_EVICTION,
|
||||
};
|
||||
@@ -249,6 +250,7 @@ enum {
|
||||
* @rd_noallocate: system cache read no allocate attribute
|
||||
* @rd_op_type: system cache read operation type
|
||||
* @flags: dirty flags to change the configuration
|
||||
* @type: sys cache type
|
||||
*/
|
||||
struct sde_hw_pipe_sc_cfg {
|
||||
u32 op_mode;
|
||||
@@ -257,6 +259,7 @@ struct sde_hw_pipe_sc_cfg {
|
||||
bool rd_noallocate;
|
||||
u32 rd_op_type;
|
||||
u32 flags;
|
||||
enum sde_sys_cache_type type;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -1107,6 +1107,8 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
|
||||
sde_crtc_complete_flip(crtc, NULL);
|
||||
}
|
||||
|
||||
sde_crtc_static_cache_read_kickoff(crtc);
|
||||
|
||||
SDE_ATRACE_END("sde_ksm_wait_for_commit_done");
|
||||
}
|
||||
|
||||
|
@@ -2753,8 +2753,10 @@ void sde_plane_set_error(struct drm_plane *plane, bool error)
|
||||
}
|
||||
|
||||
static void _sde_plane_sspp_setup_sys_cache(struct sde_plane *psde,
|
||||
struct sde_plane_state *pstate, const struct sde_format *fmt)
|
||||
struct sde_plane_state *pstate, bool is_tp10)
|
||||
{
|
||||
struct sde_sc_cfg *sc_cfg = psde->catalog->sc_cfg;
|
||||
|
||||
if (!psde->pipe_hw->ops.setup_sys_cache ||
|
||||
!(psde->perf_features & BIT(SDE_PERF_SSPP_SYS_CACHE)))
|
||||
return;
|
||||
@@ -2762,24 +2764,66 @@ static void _sde_plane_sspp_setup_sys_cache(struct sde_plane *psde,
|
||||
SDE_DEBUG("features:0x%x rotation:0x%x\n",
|
||||
psde->features, pstate->rotation);
|
||||
|
||||
if ((pstate->rotation & DRM_MODE_ROTATE_90) &&
|
||||
sde_format_is_tp10_ubwc(fmt)) {
|
||||
pstate->sc_cfg.rd_en = true;
|
||||
pstate->sc_cfg.rd_scid =
|
||||
psde->pipe_sblk->llcc_scid;
|
||||
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
|
||||
SSPP_SYS_CACHE_SCID;
|
||||
} else {
|
||||
|
||||
pstate->sc_cfg.rd_en = false;
|
||||
pstate->sc_cfg.rd_scid = 0x0;
|
||||
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
|
||||
SSPP_SYS_CACHE_SCID;
|
||||
pstate->sc_cfg.type = SDE_SYS_CACHE_NONE;
|
||||
|
||||
if (pstate->rotation & DRM_MODE_ROTATE_90) {
|
||||
if (is_tp10 && sc_cfg[SDE_SYS_CACHE_ROT].has_sys_cache) {
|
||||
pstate->sc_cfg.rd_en = true;
|
||||
pstate->sc_cfg.rd_scid =
|
||||
sc_cfg[SDE_SYS_CACHE_ROT].llcc_scid;
|
||||
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
|
||||
SSPP_SYS_CACHE_SCID;
|
||||
pstate->sc_cfg.type = SDE_SYS_CACHE_ROT;
|
||||
}
|
||||
} else if (pstate->static_cache_state == CACHE_STATE_FRAME_WRITE &&
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache) {
|
||||
pstate->sc_cfg.rd_en = true;
|
||||
pstate->sc_cfg.rd_scid =
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid;
|
||||
pstate->sc_cfg.rd_noallocate = false;
|
||||
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
|
||||
SSPP_SYS_CACHE_SCID | SSPP_SYS_CACHE_NO_ALLOC;
|
||||
pstate->sc_cfg.type = SDE_SYS_CACHE_DISP;
|
||||
} else if (pstate->static_cache_state == CACHE_STATE_FRAME_READ &&
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache) {
|
||||
pstate->sc_cfg.rd_en = true;
|
||||
pstate->sc_cfg.rd_scid =
|
||||
sc_cfg[SDE_SYS_CACHE_DISP].llcc_scid;
|
||||
pstate->sc_cfg.rd_noallocate = true;
|
||||
pstate->sc_cfg.flags = SSPP_SYS_CACHE_EN_FLAG |
|
||||
SSPP_SYS_CACHE_SCID | SSPP_SYS_CACHE_NO_ALLOC;
|
||||
pstate->sc_cfg.type = SDE_SYS_CACHE_DISP;
|
||||
}
|
||||
|
||||
psde->pipe_hw->ops.setup_sys_cache(
|
||||
psde->pipe_hw, &pstate->sc_cfg);
|
||||
}
|
||||
|
||||
void sde_plane_static_img_control(struct drm_plane *plane,
|
||||
enum sde_crtc_cache_state state)
|
||||
{
|
||||
struct sde_plane *psde;
|
||||
struct sde_plane_state *pstate;
|
||||
|
||||
if (!plane || !plane->state) {
|
||||
SDE_ERROR("invalid plane\n");
|
||||
return;
|
||||
}
|
||||
|
||||
psde = to_sde_plane(plane);
|
||||
pstate = to_sde_plane_state(plane->state);
|
||||
|
||||
pstate->static_cache_state = state;
|
||||
|
||||
if (state == CACHE_STATE_FRAME_READ)
|
||||
_sde_plane_sspp_setup_sys_cache(psde, pstate, false);
|
||||
}
|
||||
|
||||
static void _sde_plane_map_prop_to_dirty_bits(void)
|
||||
{
|
||||
plane_prop_array[PLANE_PROP_SCALER_V1] =
|
||||
@@ -3052,7 +3096,8 @@ static void _sde_plane_update_format_and_rects(struct sde_plane *psde,
|
||||
pstate->multirect_index);
|
||||
}
|
||||
|
||||
_sde_plane_sspp_setup_sys_cache(psde, pstate, fmt);
|
||||
_sde_plane_sspp_setup_sys_cache(psde, pstate,
|
||||
sde_format_is_tp10_ubwc(fmt));
|
||||
|
||||
/* update csc */
|
||||
if (SDE_FORMAT_IS_YUV(fmt))
|
||||
@@ -3347,7 +3392,8 @@ void sde_plane_restore(struct drm_plane *plane)
|
||||
sde_plane_atomic_update(plane, plane->state);
|
||||
}
|
||||
|
||||
bool sde_plane_is_cache_required(struct drm_plane *plane)
|
||||
bool sde_plane_is_cache_required(struct drm_plane *plane,
|
||||
enum sde_sys_cache_type type)
|
||||
{
|
||||
struct sde_plane_state *pstate;
|
||||
|
||||
@@ -3359,7 +3405,7 @@ bool sde_plane_is_cache_required(struct drm_plane *plane)
|
||||
pstate = to_sde_plane_state(plane->state);
|
||||
|
||||
/* check if llcc is required for the plane */
|
||||
if (pstate->sc_cfg.rd_en)
|
||||
if (pstate->sc_cfg.rd_en && (pstate->sc_cfg.type == type))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include "sde_kms.h"
|
||||
#include "sde_hw_mdss.h"
|
||||
#include "sde_hw_sspp.h"
|
||||
#include "sde_crtc.h"
|
||||
|
||||
/* dirty bits for update function */
|
||||
#define SDE_PLANE_DIRTY_RECTS 0x1
|
||||
@@ -81,6 +82,10 @@ enum sde_plane_sclcheck_state {
|
||||
* @scaler3_cfg: configuration data for scaler3
|
||||
* @pixel_ext: configuration data for pixel extensions
|
||||
* @scaler_check_state: indicates status of user provided pixel extension data
|
||||
* @pre_down: pre down scale configuration
|
||||
* @sc_cfg: system cache configuration
|
||||
* @rotation: rotation cache state
|
||||
* @static_cache_state: plane cache state for static image
|
||||
* @cdp_cfg: CDP configuration
|
||||
*/
|
||||
struct sde_plane_state {
|
||||
@@ -108,6 +113,7 @@ struct sde_plane_state {
|
||||
/* @sc_cfg: system_cache configuration */
|
||||
struct sde_hw_pipe_sc_cfg sc_cfg;
|
||||
uint32_t rotation;
|
||||
uint32_t static_cache_state;
|
||||
|
||||
struct sde_hw_pipe_cdp_cfg cdp_cfg;
|
||||
};
|
||||
@@ -305,8 +311,18 @@ void sde_plane_setup_src_split_order(struct drm_plane *plane,
|
||||
/* sde_plane_is_cache_required - indicates if the system cache is
|
||||
* required for the plane.
|
||||
* @plane: Pointer to DRM plane object
|
||||
* @type: sys cache type
|
||||
* Returns: true if sys cache is required, otherwise false.
|
||||
*/
|
||||
bool sde_plane_is_cache_required(struct drm_plane *plane);
|
||||
bool sde_plane_is_cache_required(struct drm_plane *plane,
|
||||
enum sde_sys_cache_type type);
|
||||
|
||||
/**
|
||||
* sde_plane_static_img_control - Switch the static image state
|
||||
* @plane: Pointer to drm plane structure
|
||||
* @state: state to set
|
||||
*/
|
||||
void sde_plane_static_img_control(struct drm_plane *plane,
|
||||
enum sde_crtc_cache_state state);
|
||||
|
||||
#endif /* _SDE_PLANE_H_ */
|
||||
|
Reference in New Issue
Block a user