Add 'qcom/opensource/display-drivers/' from commit '5ff96e683134b356ebe6c37069b4197034502ef9'

git-subtree-dir: qcom/opensource/display-drivers
git-subtree-mainline: 2d61911ef6
git-subtree-split: 5ff96e6831
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/display-drivers
tag: DISPLAY.LA.4.0.r2-07600-lanai.0
このコミットが含まれているのは:
David Wronek
2024-10-06 16:44:29 +02:00
コミット 1841c0f616
337個のファイルの変更239264行の追加0行の削除

ファイルの表示

@@ -0,0 +1,965 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2012, 2015-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/major.h>
#include <linux/debugfs.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/regulator/consumer.h>
#define CREATE_TRACE_POINTS
#include "sde_rotator_base.h"
#include "sde_rotator_util.h"
#include "sde_rotator_trace.h"
#include "sde_rotator_debug.h"
#include "sde_rotator_dev.h"
#include "sde_rotator_vbif.h"
static const struct sde_rot_bus_data sde_rot_reg_bus_table[] = {
{0, 0},
{0, 76800},
{0, 150000},
{0, 300000},
};
static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
{
u64 result = (val * (u64)numer);
do_div(result, denom);
return result;
}
static inline u64 apply_fudge_factor(u64 val,
struct sde_mult_factor *factor)
{
return fudge_factor(val, factor->numer, factor->denom);
}
static inline u64 apply_inverse_fudge_factor(u64 val,
struct sde_mult_factor *factor)
{
return fudge_factor(val, factor->denom, factor->numer);
}
static inline bool validate_comp_ratio(struct sde_mult_factor *factor)
{
return factor->numer && factor->denom;
}
const struct sde_rot_bus_data *sde_get_rot_reg_bus_value(u32 usecase_ndx)
{
return &sde_rot_reg_bus_table[usecase_ndx];
}
u32 sde_apply_comp_ratio_factor(u32 quota,
struct sde_mdp_format_params *fmt,
struct sde_mult_factor *factor)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
if (!mdata || !test_bit(SDE_QOS_OVERHEAD_FACTOR,
mdata->sde_qos_map))
return quota;
/* apply compression ratio, only for compressed formats */
if (sde_mdp_is_ubwc_format(fmt) &&
validate_comp_ratio(factor))
quota = apply_inverse_fudge_factor(quota, factor);
return quota;
}
#define RES_1080p (1088*1920)
#define RES_UHD (3840*2160)
#define RES_WQXGA (2560*1600)
#define XIN_HALT_TIMEOUT_US 0x4000
static int sde_mdp_wait_for_xin_halt(u32 xin_id)
{
void __iomem *vbif_base;
u32 status;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
u32 idle_mask = BIT(xin_id);
int rc;
vbif_base = mdata->vbif_nrt_io.base;
rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
status, (status & idle_mask),
1000, XIN_HALT_TIMEOUT_US);
if (rc == -ETIMEDOUT) {
SDEROT_ERR("VBIF client %d not halting. TIMEDOUT.\n",
xin_id);
} else {
SDEROT_DBG("VBIF client %d is halted\n", xin_id);
}
return rc;
}
/**
* force_on_xin_clk() - enable/disable the force-on for the pipe clock
* @bit_off: offset of the bit to enable/disable the force-on.
* @reg_off: register offset for the clock control.
* @enable: boolean to indicate if the force-on of the clock needs to be
* enabled or disabled.
*
* This function returns:
* true - if the clock is forced-on by this function
* false - if the clock was already forced on
* It is the caller responsibility to check if this function is forcing
* the clock on; if so, it will need to remove the force of the clock,
* otherwise it should avoid to remove the force-on.
* Clocks must be on when calling this function.
*/
static bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
{
u32 val;
u32 force_on_mask;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
bool clk_forced_on = false;
force_on_mask = BIT(bit_off);
val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
clk_forced_on = !(force_on_mask & val);
if (enable)
val |= force_on_mask;
else
val &= ~force_on_mask;
writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
return clk_forced_on;
}
void vbif_lock(struct platform_device *parent_pdev)
{
if (!parent_pdev)
return;
mdp_vbif_lock(parent_pdev, true);
}
void vbif_unlock(struct platform_device *parent_pdev)
{
if (!parent_pdev)
return;
mdp_vbif_lock(parent_pdev, false);
}
void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
u32 reg_val;
bool forced_on;
int rc = 0;
if (!mdata || !params || !params->reg_off_mdp_clk_ctrl) {
SDEROT_ERR("null input parameter\n");
return;
}
if (!mdata->parent_pdev &&
params->xin_id > MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1) {
SDEROT_ERR("xin_id:%d exceed max limit\n", params->xin_id);
return;
}
forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
params->reg_off_mdp_clk_ctrl, true);
vbif_lock(mdata->parent_pdev);
SDEROT_EVTLOG(forced_on, params->xin_id);
reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
reg_val | BIT(params->xin_id));
/* this is a polling operation */
rc = sde_mdp_wait_for_xin_halt(params->xin_id);
if (rc == -ETIMEDOUT)
params->xin_timeout = BIT(params->xin_id);
reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
reg_val & ~BIT(params->xin_id));
vbif_unlock(mdata->parent_pdev);
if (forced_on)
force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
params->reg_off_mdp_clk_ctrl, false);
}
u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
struct sde_mdp_format_params *fmt;
u32 ot_lim;
u32 is_yuv;
u64 res;
ot_lim = (is_rd) ? mdata->default_ot_rd_limit :
mdata->default_ot_wr_limit;
/*
* If default ot is not set from dt,
* then do not configure it.
*/
if (ot_lim == 0)
goto exit;
/* Modify the limits if the target and the use case requires it */
if (false == test_bit(SDE_QOS_OTLIM, mdata->sde_qos_map))
goto exit;
width = min_t(u32, width, SDE_ROT_MAX_IMG_WIDTH);
height = min_t(u32, height, SDE_ROT_MAX_IMG_HEIGHT);
res = width * height;
res = res * fps;
fmt = sde_get_format_params(pixfmt);
if (!fmt) {
SDEROT_WARN("invalid format %8.8x\n", pixfmt);
goto exit;
}
is_yuv = sde_mdp_is_yuv_format(fmt);
SDEROT_DBG("w:%d h:%d fps:%d pixfmt:%8.8x yuv:%d res:%llu rd:%d\n",
width, height, fps, pixfmt, is_yuv, res, is_rd);
/*
* If (total_source_pixels <= 62208000 && YUV) -> RD/WROT=2 //1080p30
* If (total_source_pixels <= 124416000 && YUV) -> RD/WROT=4 //1080p60
* If (total_source_pixels <= 2160p && YUV && FPS <= 30) -> RD/WROT = 32
*/
if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
SDE_MDP_HW_REV_540)) {
if (is_yuv) {
if (res <= (RES_1080p * 30))
ot_lim = 2;
else if (res <= (RES_1080p * 60))
ot_lim = 4;
else if (res <= (RES_WQXGA * 60))
ot_lim = 4;
else if (res <= (RES_UHD * 30))
ot_lim = 8;
} else if (fmt->bpp == 4 && res <= (RES_WQXGA * 60)) {
ot_lim = 16;
}
} else if (IS_SDE_MAJOR_SAME(mdata->mdss_version,
SDE_MDP_HW_REV_600) ||
IS_SDE_MAJOR_SAME(mdata->mdss_version,
SDE_MDP_HW_REV_870) || is_yuv) {
if (res <= (RES_1080p * 30))
ot_lim = 2;
else if (res <= (RES_1080p * 60))
ot_lim = 4;
}
exit:
SDEROT_DBG("ot_lim=%d\n", ot_lim);
return ot_lim;
}
static u32 get_ot_limit(u32 reg_off, u32 bit_off,
struct sde_mdp_set_ot_params *params)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
u32 ot_lim;
u32 val;
ot_lim = sde_mdp_get_ot_limit(
params->width, params->height,
params->fmt, params->fps,
params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF);
/*
* If default ot is not set from dt,
* then do not configure it.
*/
if (ot_lim == 0)
goto exit;
val = SDE_VBIF_READ(mdata, reg_off);
val &= (0xFF << bit_off);
val = val >> bit_off;
SDEROT_EVTLOG(val, ot_lim);
if (val == ot_lim)
ot_lim = 0;
exit:
SDEROT_DBG("ot_lim=%d\n", ot_lim);
SDEROT_EVTLOG(params->width, params->height, params->fmt, params->fps,
ot_lim);
return ot_lim;
}
void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
u32 ot_lim;
u32 reg_off_vbif_lim_conf = ((params->xin_id / mdata->npriority_lvl)
* mdata->npriority_lvl)
+ params->reg_off_vbif_lim_conf;
u32 bit_off_vbif_lim_conf = (params->xin_id % mdata->npriority_lvl) * 8;
u32 reg_val;
u32 sts;
bool forced_on;
vbif_lock(mdata->parent_pdev);
ot_lim = get_ot_limit(
reg_off_vbif_lim_conf,
bit_off_vbif_lim_conf,
params) & 0xFF;
if (ot_lim == 0)
goto exit;
if (params->rotsts_base && params->rotsts_busy_mask) {
sts = readl_relaxed(params->rotsts_base);
if (sts & params->rotsts_busy_mask) {
SDEROT_ERR(
"Rotator still busy, should not modify VBIF\n");
SDEROT_EVTLOG_TOUT_HANDLER(
"rot", "vbif_dbg_bus", "panic");
}
}
trace_rot_perf_set_ot(params->num, params->xin_id, ot_lim);
forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
params->reg_off_mdp_clk_ctrl, true);
reg_val = SDE_VBIF_READ(mdata, reg_off_vbif_lim_conf);
reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
SDE_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val);
reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
reg_val | BIT(params->xin_id));
/* this is a polling operation */
sde_mdp_wait_for_xin_halt(params->xin_id);
reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
reg_val & ~BIT(params->xin_id));
if (forced_on)
force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
params->reg_off_mdp_clk_ctrl, false);
SDEROT_EVTLOG(params->num, params->xin_id, ot_lim);
exit:
vbif_unlock(mdata->parent_pdev);
return;
}
/*
* sde_mdp_set_vbif_memtype - set memtype output for the given xin port
* @mdata: pointer to global rotator data
* @xin_id: xin identifier
* @memtype: memtype output configuration
* return: none
*/
static void sde_mdp_set_vbif_memtype(struct sde_rot_data_type *mdata,
u32 xin_id, u32 memtype)
{
u32 reg_off;
u32 bit_off;
u32 reg_val;
/*
* Assume 4 bits per bit field, 8 fields per 32-bit register.
*/
if (xin_id >= 8)
return;
reg_off = MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0;
bit_off = (xin_id & 0x7) * 4;
reg_val = SDE_VBIF_READ(mdata, reg_off);
reg_val &= ~(0x7 << bit_off);
reg_val |= (memtype & 0x7) << bit_off;
SDE_VBIF_WRITE(mdata, reg_off, reg_val);
}
/*
* sde_mdp_init_vbif - initialize static vbif configuration
* return: 0 if success; error code otherwise
*/
int sde_mdp_init_vbif(void)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int i;
if (!mdata)
return -EINVAL;
if (mdata->vbif_memtype_count && mdata->vbif_memtype) {
for (i = 0; i < mdata->vbif_memtype_count; i++)
sde_mdp_set_vbif_memtype(mdata, i,
mdata->vbif_memtype[i]);
SDEROT_DBG("amemtype=0x%x\n", SDE_VBIF_READ(mdata,
MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0));
}
return 0;
}
struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name)
{
struct reg_bus_client *client;
struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
static u32 id;
if (client_name == NULL) {
SDEROT_ERR("client name is null\n");
return ERR_PTR(-EINVAL);
}
client = kzalloc(sizeof(struct reg_bus_client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
mutex_lock(&sde_res->reg_bus_lock);
strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
client->usecase_ndx = VOTE_INDEX_DISABLE;
client->id = id;
SDEROT_DBG("bus vote client %s created:%pK id :%d\n", client_name,
client, id);
id++;
list_add(&client->list, &sde_res->reg_bus_clist);
mutex_unlock(&sde_res->reg_bus_lock);
return client;
}
void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client)
{
struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
if (!client) {
SDEROT_ERR("reg bus vote: invalid client handle\n");
} else {
SDEROT_DBG("bus vote client %s destroyed:%pK id:%u\n",
client->name, client, client->id);
mutex_lock(&sde_res->reg_bus_lock);
list_del_init(&client->list);
mutex_unlock(&sde_res->reg_bus_lock);
kfree(client);
}
}
int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
{
int ret = 0;
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
const struct sde_rot_bus_data *reg_bus_value = NULL;
struct reg_bus_client *client, *temp_client;
struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
if (!sde_res || !sde_res->reg_bus_hdl || !bus_client)
return 0;
mutex_lock(&sde_res->reg_bus_lock);
bus_client->usecase_ndx = usecase_ndx;
list_for_each_entry_safe(client, temp_client, &sde_res->reg_bus_clist,
list) {
if (client->usecase_ndx < VOTE_INDEX_MAX &&
client->usecase_ndx > max_usecase_ndx)
max_usecase_ndx = client->usecase_ndx;
}
if (sde_res->reg_bus_usecase_ndx != max_usecase_ndx)
changed = true;
SDEROT_DBG(
"%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
__builtin_return_address(0), changed, max_usecase_ndx,
bus_client->name, bus_client->id, usecase_ndx);
if (changed) {
reg_bus_value = sde_get_rot_reg_bus_value(max_usecase_ndx);
ret = icc_set_bw(sde_res->reg_bus_hdl, reg_bus_value->ab,
reg_bus_value->ib);
}
if (ret) {
pr_err("rotator: reg_bus_hdl set failed ab=%llu, ib=%llu\n",
reg_bus_value->ab, reg_bus_value->ib);
if (sde_res->reg_bus_usecase_ndx == VOTE_INDEX_DISABLE)
pr_err("rotator: reg_bus_hdl was disabled\n");
} else {
sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
}
mutex_unlock(&sde_res->reg_bus_lock);
return ret;
}
static int sde_mdp_parse_dt_handler(struct platform_device *pdev,
char *prop_name, u32 *offsets, int len)
{
int rc;
rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
offsets, len);
if (rc) {
SDEROT_DBG("Error from prop %s : u32 array read\n", prop_name);
return -EINVAL;
}
return 0;
}
static int sde_mdp_parse_dt_prop_len(struct platform_device *pdev,
char *prop_name)
{
int len = 0;
of_find_property(pdev->dev.of_node, prop_name, &len);
if (len < 1) {
SDEROT_INFO("prop %s : doesn't exist in device tree\n",
prop_name);
return 0;
}
len = len/sizeof(u32);
return len;
}
static void sde_mdp_parse_vbif_memtype(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
int rc;
mdata->vbif_memtype_count = sde_mdp_parse_dt_prop_len(pdev,
"qcom,mdss-rot-vbif-memtype");
mdata->vbif_memtype = kcalloc(mdata->vbif_memtype_count,
sizeof(u32), GFP_KERNEL);
if (!mdata->vbif_memtype || !mdata->vbif_memtype_count) {
mdata->vbif_memtype_count = 0;
return;
}
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-vbif-memtype", mdata->vbif_memtype,
mdata->vbif_memtype_count);
if (rc) {
SDEROT_DBG("vbif memtype not found\n");
kfree(mdata->vbif_memtype);
mdata->vbif_memtype = NULL;
mdata->vbif_memtype_count = 0;
return;
}
}
static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
int rc;
mdata->vbif_rt_qos = NULL;
mdata->npriority_lvl = sde_mdp_parse_dt_prop_len(pdev,
"qcom,mdss-rot-vbif-qos-setting");
mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
sizeof(u32), GFP_KERNEL);
if (!mdata->vbif_nrt_qos || !mdata->npriority_lvl) {
mdata->npriority_lvl = 0;
return;
}
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-vbif-qos-setting", mdata->vbif_nrt_qos,
mdata->npriority_lvl);
if (rc) {
SDEROT_DBG("vbif setting not found\n");
kfree(mdata->vbif_nrt_qos);
mdata->vbif_nrt_qos = NULL;
mdata->npriority_lvl = 0;
return;
}
}
static void sde_mdp_parse_vbif_xin_id(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
mdata->vbif_xin_id[XIN_SSPP] = XIN_SSPP;
mdata->vbif_xin_id[XIN_WRITEBACK] = XIN_WRITEBACK;
sde_mdp_parse_dt_handler(pdev, "qcom,mdss-rot-xin-id",
mdata->vbif_xin_id, MAX_XIN);
}
static void sde_mdp_parse_cdp_setting(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
int rc;
u32 len, data[SDE_ROT_OP_MAX] = {0};
len = sde_mdp_parse_dt_prop_len(pdev,
"qcom,mdss-rot-cdp-setting");
if (len == SDE_ROT_OP_MAX) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-cdp-setting", data, len);
if (rc) {
SDEROT_ERR("invalid CDP setting\n");
goto end;
}
set_bit(SDE_QOS_CDP, mdata->sde_qos_map);
mdata->enable_cdp[SDE_ROT_RD] = data[SDE_ROT_RD];
mdata->enable_cdp[SDE_ROT_WR] = data[SDE_ROT_WR];
return;
}
end:
clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
}
static void sde_mdp_parse_rot_lut_setting(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
int rc;
u32 len, data[4];
len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-qos-lut");
if (len == 4) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-qos-lut", data, len);
if (!rc) {
mdata->lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
mdata->lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
mdata->lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
mdata->lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
set_bit(SDE_QOS_LUT, mdata->sde_qos_map);
} else {
SDEROT_DBG("qos lut setting not found\n");
}
}
len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-danger-lut");
if (len == SDE_ROT_OP_MAX) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-danger-lut", data, len);
if (!rc) {
mdata->lut_cfg[SDE_ROT_RD].danger_lut
= data[SDE_ROT_RD];
mdata->lut_cfg[SDE_ROT_WR].danger_lut
= data[SDE_ROT_WR];
set_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map);
} else {
SDEROT_DBG("danger lut setting not found\n");
}
}
len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-safe-lut");
if (len == SDE_ROT_OP_MAX) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-safe-lut", data, len);
if (!rc) {
mdata->lut_cfg[SDE_ROT_RD].safe_lut = data[SDE_ROT_RD];
mdata->lut_cfg[SDE_ROT_WR].safe_lut = data[SDE_ROT_WR];
set_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map);
} else {
SDEROT_DBG("safe lut setting not found\n");
}
}
}
static void sde_mdp_parse_inline_rot_lut_setting(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
int rc;
u32 len, data[4];
len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-qos-lut");
if (len == 4) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-inline-rot-qos-lut", data, len);
if (!rc) {
mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
set_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map);
} else {
SDEROT_DBG("inline qos lut setting not found\n");
}
}
len = sde_mdp_parse_dt_prop_len(pdev,
"qcom,mdss-inline-rot-danger-lut");
if (len == SDE_ROT_OP_MAX) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-inline-rot-danger-lut", data, len);
if (!rc) {
mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut
= data[SDE_ROT_RD];
mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut
= data[SDE_ROT_WR];
set_bit(SDE_INLINE_QOS_DANGER_LUT,
mdata->sde_inline_qos_map);
} else {
SDEROT_DBG("inline danger lut setting not found\n");
}
}
len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-safe-lut");
if (len == SDE_ROT_OP_MAX) {
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-inline-rot-safe-lut", data, len);
if (!rc) {
mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut
= data[SDE_ROT_RD];
mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut
= data[SDE_ROT_WR];
set_bit(SDE_INLINE_QOS_SAFE_LUT,
mdata->sde_inline_qos_map);
} else {
SDEROT_DBG("inline safe lut setting not found\n");
}
}
}
static void sde_mdp_parse_rt_rotator(struct device_node *np)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
struct platform_device *pdev;
struct of_phandle_args phargs;
int rc = 0;
rc = of_parse_phandle_with_args(np,
"qcom,mdss-rot-parent", "#list-cells", 0, &phargs);
if (rc)
return;
if (!phargs.np || !phargs.args_count) {
SDEROT_ERR("invalid args\n");
return;
}
pdev = of_find_device_by_node(phargs.np);
if (pdev) {
mdata->parent_pdev = pdev;
} else {
mdata->parent_pdev = NULL;
SDEROT_ERR("Parent mdp node not available\n");
}
of_node_put(phargs.np);
}
static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
int rc;
u32 data;
rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
&data);
mdata->rot_block_size = (!rc ? data : 128);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,mdss-default-ot-rd-limit", &data);
mdata->default_ot_rd_limit = (!rc ? data : 0);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,mdss-default-ot-wr-limit", &data);
mdata->default_ot_wr_limit = (!rc ? data : 0);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
if (rc)
SDEROT_DBG(
"Could not read optional property: highest bank bit\n");
sde_mdp_parse_cdp_setting(pdev, mdata);
sde_mdp_parse_vbif_qos(pdev, mdata);
sde_mdp_parse_vbif_xin_id(pdev, mdata);
sde_mdp_parse_vbif_memtype(pdev, mdata);
sde_mdp_parse_rot_lut_setting(pdev, mdata);
sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,mdss-rot-qos-cpu-mask", &data);
mdata->rot_pm_qos_cpu_mask = (!rc ? data : 0);
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,mdss-rot-qos-cpu-dma-latency", &data);
mdata->rot_pm_qos_cpu_dma_latency = (!rc ? data : 0);
mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
return 0;
}
static void sde_mdp_destroy_dt_misc(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
kfree(mdata->vbif_memtype);
mdata->vbif_memtype = NULL;
kfree(mdata->vbif_rt_qos);
mdata->vbif_rt_qos = NULL;
kfree(mdata->vbif_nrt_qos);
mdata->vbif_nrt_qos = NULL;
}
static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
{
int rc = 0;
mdata->reg_bus_hdl = of_icc_get(&mdata->pdev->dev, "qcom,sde-reg-bus");
if (mdata->reg_bus_hdl == NULL) {
pr_err("rotator: reg bus dt node missing\n");
return 0;
} else if (IS_ERR(mdata->reg_bus_hdl)) {
SDEROT_ERR("reg bus handle parsing failed\n");
mdata->reg_bus_hdl = NULL;
rc = -EINVAL;
} else {
SDEROT_DBG("rotator reg_bus_hdl parsing success\n");
}
return rc;
}
static void sde_mdp_bus_scale_unregister(struct sde_rot_data_type *mdata)
{
SDEROT_DBG("unregister reg_bus_hdl\n");
if (mdata->reg_bus_hdl) {
icc_put(mdata->reg_bus_hdl);
mdata->reg_bus_hdl = NULL;
}
}
static struct sde_rot_data_type *sde_rot_res;
struct sde_rot_data_type *sde_rot_get_mdata(void)
{
return sde_rot_res;
}
/*
* sde_rotator_base_init - initialize base rotator data/resource
*/
int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
struct platform_device *pdev,
const void *drvdata)
{
int rc;
struct sde_rot_data_type *mdata;
/* if probe deferral happened, return early*/
if (sde_rot_res) {
SDEROT_ERR("Rotator data already initialized, skip init\n");
return 0;
}
mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
if (mdata == NULL)
return -ENOMEM;
mdata->pdev = pdev;
sde_rot_res = mdata;
mutex_init(&mdata->reg_bus_lock);
INIT_LIST_HEAD(&mdata->reg_bus_clist);
rc = sde_rot_ioremap_byname(pdev, &mdata->sde_io, "mdp_phys");
if (rc) {
SDEROT_ERR("unable to map SDE base\n");
goto probe_done;
}
SDEROT_DBG("SDE ROT HW Base addr=0x%x len=0x%x\n",
(int) (unsigned long) mdata->sde_io.base,
mdata->sde_io.len);
rc = sde_rot_ioremap_byname(pdev, &mdata->vbif_nrt_io, "rot_vbif_phys");
if (rc) {
SDEROT_ERR("unable to map SDE ROT VBIF base\n");
goto probe_done;
}
SDEROT_DBG("SDE ROT VBIF HW Base addr=%pK len=0x%x\n",
mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
sde_mdp_parse_rt_rotator(pdev->dev.of_node);
rc = sde_mdp_parse_dt_misc(pdev, mdata);
if (rc) {
SDEROT_ERR("Error in device tree : misc\n");
goto probe_done;
}
rc = sde_mdp_bus_scale_register(mdata);
if (rc) {
SDEROT_ERR("unable to register bus scaling\n");
goto probe_done;
}
rc = sde_smmu_init(&pdev->dev);
if (rc) {
SDEROT_ERR("sde smmu init failed %d\n", rc);
goto probe_done;
}
*pmdata = mdata;
return 0;
probe_done:
return rc;
}
/*
* sde_rotator_base_destroy - clean up base rotator data/resource
*/
void sde_rotator_base_destroy(struct sde_rot_data_type *mdata)
{
struct platform_device *pdev;
if (!mdata || !mdata->pdev)
return;
pdev = mdata->pdev;
sde_rot_res = NULL;
sde_mdp_bus_scale_unregister(mdata);
sde_mdp_destroy_dt_misc(pdev, mdata);
sde_rot_iounmap(&mdata->vbif_nrt_io);
sde_rot_iounmap(&mdata->sde_io);
devm_kfree(&pdev->dev, mdata);
}

ファイルの表示

@@ -0,0 +1,361 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_BASE_H__
#define __SDE_ROTATOR_BASE_H__
#include <linux/types.h>
#include <linux/file.h>
#include <linux/kref.h>
#include <linux/kernel.h>
#include <linux/regulator/consumer.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/interconnect.h>
#include "sde_rotator_hwio.h"
#include "sde_rotator_io_util.h"
#include "sde_rotator_smmu.h"
#include "sde_rotator_formats.h"
#include <linux/pm_qos.h>
/* HW Revisions for different targets */
#define SDE_GET_MAJOR_REV(rev) ((rev) >> 28)
#define SDE_GET_MAJOR_MINOR(rev) ((rev) >> 16)
#define IS_SDE_MAJOR_SAME(rev1, rev2) \
(SDE_GET_MAJOR_REV((rev1)) == SDE_GET_MAJOR_REV((rev2)))
#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
(SDE_GET_MAJOR_MINOR(rev1) == SDE_GET_MAJOR_MINOR(rev2))
#define SDE_MDP_REV(major, minor, step) \
((u32)(((major) & 0x000F) << 28) | \
(((minor) & 0x0FFF) << 16) | \
((step) & 0xFFFF))
#define SDE_MDP_HW_REV_107 SDE_MDP_REV(1, 0, 7) /* 8996 v1.0 */
#define SDE_MDP_HW_REV_300 SDE_MDP_REV(3, 0, 0) /* 8998 v1.0 */
#define SDE_MDP_HW_REV_301 SDE_MDP_REV(3, 0, 1) /* 8998 v1.1 */
#define SDE_MDP_HW_REV_400 SDE_MDP_REV(4, 0, 0) /* sdm845 v1.0 */
#define SDE_MDP_HW_REV_410 SDE_MDP_REV(4, 1, 0) /* sdm670 v1.0 */
#define SDE_MDP_HW_REV_500 SDE_MDP_REV(5, 0, 0) /* sm8150 v1.0 */
#define SDE_MDP_HW_REV_520 SDE_MDP_REV(5, 2, 0) /* sdmmagpie v1.0 */
#define SDE_MDP_HW_REV_530 SDE_MDP_REV(5, 3, 0) /* sm6150 v1.0 */
#define SDE_MDP_HW_REV_540 SDE_MDP_REV(5, 4, 0) /* sdmtrinket v1.0 */
#define SDE_MDP_HW_REV_600 SDE_MDP_REV(6, 0, 0) /* msmnile+ v1.0 */
#define SDE_MDP_HW_REV_630 SDE_MDP_REV(6, 3, 0) /* bengal v1.0 */
#define SDE_MDP_HW_REV_660 SDE_MDP_REV(6, 6, 0) /* holi */
#define SDE_MDP_HW_REV_690 SDE_MDP_REV(6, 9, 0) /* blair */
#define SDE_MDP_HW_REV_870 SDE_MDP_REV(8, 7, 0) /* pitti */
#define SDE_MDP_VBIF_4_LEVEL_REMAPPER 4
#define SDE_MDP_VBIF_8_LEVEL_REMAPPER 8
/* XIN mapping */
#define XIN_SSPP 0
#define XIN_WRITEBACK 1
#define MAX_XIN 2
struct sde_mult_factor {
uint32_t numer;
uint32_t denom;
};
struct sde_mdp_set_ot_params {
u32 xin_id;
u32 num;
u32 width;
u32 height;
u32 fps;
u32 fmt;
u32 reg_off_vbif_lim_conf;
u32 reg_off_mdp_clk_ctrl;
u32 bit_off_mdp_clk_ctrl;
char __iomem *rotsts_base;
u32 rotsts_busy_mask;
};
/*
* struct sde_mdp_vbif_halt_params: parameters for issue halt request to vbif
* @xin_id: xin port number of vbif
* @reg_off_mdp_clk_ctrl: reg offset for vbif clock control
* @bit_off_mdp_clk_ctrl: bit offset for vbif clock control
* @xin_timeout: bit position indicates timeout on corresponding xin id
*/
struct sde_mdp_vbif_halt_params {
u32 xin_id;
u32 reg_off_mdp_clk_ctrl;
u32 bit_off_mdp_clk_ctrl;
u32 xin_timeout;
};
enum sde_bus_vote_type {
VOTE_INDEX_DISABLE,
VOTE_INDEX_76_MHZ,
VOTE_INDEX_150_MHZ,
VOTE_INDEX_300_MHZ,
VOTE_INDEX_MAX,
};
#define MAX_CLIENT_NAME_LEN 64
enum sde_qos_settings {
SDE_QOS_PER_PIPE_IB,
SDE_QOS_OVERHEAD_FACTOR,
SDE_QOS_CDP,
SDE_QOS_OTLIM,
SDE_QOS_PER_PIPE_LUT,
SDE_QOS_SIMPLIFIED_PREFILL,
SDE_QOS_VBLANK_PANIC_CTRL,
SDE_QOS_LUT,
SDE_QOS_DANGER_LUT,
SDE_QOS_SAFE_LUT,
SDE_QOS_MAX,
};
enum sde_inline_qos_settings {
SDE_INLINE_QOS_LUT,
SDE_INLINE_QOS_DANGER_LUT,
SDE_INLINE_QOS_SAFE_LUT,
SDE_INLINE_QOS_MAX,
};
/**
* enum sde_rot_type: SDE rotator HW version
* @SDE_ROT_TYPE_V1_0: V1.0 HW version
* @SDE_ROT_TYPE_V1_1: V1.1 HW version
*/
enum sde_rot_type {
SDE_ROT_TYPE_V1_0 = 0x10000000,
SDE_ROT_TYPE_V1_1 = 0x10010000,
SDE_ROT_TYPE_MAX,
};
/**
* enum sde_caps_settings: SDE rotator capability definition
* @SDE_CAPS_R1_WB: MDSS V1.x WB block
* @SDE_CAPS_R3_WB: MDSS V3.x WB block
* @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
* @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
* @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
* @SDE_CAPS_PARTIALWR: partial write override
* @SDE_CAPS_HW_TIMESTAMP: rotator has hw timestamp support
* @SDE_CAPS_UBWC_3: universal bandwidth compression version 3
* @SDE_CAPS_UBWC_4: universal bandwidth compression version 4
*/
enum sde_caps_settings {
SDE_CAPS_R1_WB,
SDE_CAPS_R3_WB,
SDE_CAPS_R3_1P5_DOWNSCALE,
SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
SDE_CAPS_SBUF_1,
SDE_CAPS_UBWC_2,
SDE_CAPS_PARTIALWR,
SDE_CAPS_HW_TIMESTAMP,
SDE_CAPS_UBWC_3,
SDE_CAPS_UBWC_4,
SDE_CAPS_MAX,
};
enum sde_bus_clients {
SDE_ROT_RT,
SDE_ROT_NRT,
SDE_MAX_BUS_CLIENTS
};
enum sde_rot_op {
SDE_ROT_RD,
SDE_ROT_WR,
SDE_ROT_OP_MAX
};
enum sde_rot_regdump_access {
SDE_ROT_REGDUMP_READ,
SDE_ROT_REGDUMP_WRITE,
SDE_ROT_REGDUMP_VBIF,
SDE_ROT_REGDUMP_MAX
};
struct reg_bus_client {
char name[MAX_CLIENT_NAME_LEN];
short usecase_ndx;
u32 id;
struct list_head list;
};
struct sde_smmu_client {
struct device *dev;
struct iommu_domain *rot_domain;
struct sde_module_power mp;
struct reg_bus_client *reg_bus_clt;
bool domain_attached;
int domain;
u32 sid;
};
/*
* struct sde_rot_bus_data: struct for bus setting
* @ab: average bandwidth in kilobytes per second
* @ib: peak bandwidth in kilobytes per second
*/
struct sde_rot_bus_data {
uint64_t ab; /* Arbitrated bandwidth */
uint64_t ib; /* Instantaneous bandwidth */
};
/*
* struct sde_rot_debug_bus: rotator debugbus header structure
* @wr_addr: write address for debugbus controller
* @block_id: rotator debugbus block id
* @test_id: rotator debugbus test id
*/
struct sde_rot_debug_bus {
u32 wr_addr;
u32 block_id;
u32 test_id;
};
struct sde_rot_vbif_debug_bus {
u32 disable_bus_addr;
u32 block_bus_addr;
u32 bit_offset;
u32 block_cnt;
u32 test_pnt_cnt;
};
struct sde_rot_regdump {
char *name;
u32 offset;
u32 len;
enum sde_rot_regdump_access access;
u32 value;
};
struct sde_rot_lut_cfg {
u32 creq_lut_0;
u32 creq_lut_1;
u32 danger_lut;
u32 safe_lut;
};
struct sde_rot_data_type {
u32 mdss_version;
struct platform_device *pdev;
struct platform_device *parent_pdev;
struct sde_io_data sde_io;
struct sde_io_data vbif_nrt_io;
char __iomem *mdp_base;
struct sde_smmu_client sde_smmu[SDE_IOMMU_MAX_DOMAIN];
/* bitmap to track qos applicable settings */
DECLARE_BITMAP(sde_qos_map, SDE_QOS_MAX);
DECLARE_BITMAP(sde_inline_qos_map, SDE_QOS_MAX);
/* bitmap to track capability settings */
DECLARE_BITMAP(sde_caps_map, SDE_CAPS_MAX);
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
u32 highest_bank_bit;
u32 rot_block_size;
/* register bus (AHB) */
struct icc_path *reg_bus_hdl;
u32 reg_bus_usecase_ndx;
struct list_head reg_bus_clist;
struct mutex reg_bus_lock;
u32 *vbif_rt_qos;
u32 *vbif_nrt_qos;
u32 npriority_lvl;
u32 vbif_xin_id[MAX_XIN];
struct pm_qos_request pm_qos_rot_cpu_req;
u32 rot_pm_qos_cpu_count;
u32 rot_pm_qos_cpu_mask;
u32 rot_pm_qos_cpu_dma_latency;
u32 vbif_memtype_count;
u32 *vbif_memtype;
int iommu_attached;
int iommu_ref_cnt;
struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus;
u32 nrt_vbif_dbg_bus_size;
struct sde_rot_debug_bus *rot_dbg_bus;
u32 rot_dbg_bus_size;
struct sde_rot_regdump *regdump;
u32 regdump_size;
void *sde_rot_hw;
int sec_cam_en;
u32 enable_cdp[SDE_ROT_OP_MAX];
struct sde_rot_lut_cfg lut_cfg[SDE_ROT_OP_MAX];
struct sde_rot_lut_cfg inline_lut_cfg[SDE_ROT_OP_MAX];
bool clk_always_on;
};
int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
struct platform_device *pdev,
const void *drvdata);
void sde_rotator_base_destroy(struct sde_rot_data_type *data);
#if IS_ENABLED(CONFIG_MSM_SDE_ROTATOR)
struct sde_rot_data_type *sde_rot_get_mdata(void);
#else
static inline struct sde_rot_data_type *sde_rot_get_mdata(void)
{
return NULL;
}
#endif /* CONFIG_MSM_SDE_ROTATOR */
struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name);
void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client);
int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx);
u32 sde_apply_comp_ratio_factor(u32 quota,
struct sde_mdp_format_params *fmt,
struct sde_mult_factor *factor);
u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd);
void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
void vbif_lock(struct platform_device *parent_pdev);
void vbif_unlock(struct platform_device *parent_pdev);
void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params);
int sde_mdp_init_vbif(void);
const struct sde_rot_bus_data *sde_get_rot_reg_bus_value(u32 usecase_ndx);
#define SDE_VBIF_WRITE(mdata, offset, value) \
(sde_reg_w(&mdata->vbif_nrt_io, offset, value, 0))
#define SDE_VBIF_READ(mdata, offset) \
(sde_reg_r(&mdata->vbif_nrt_io, offset, 0))
#define SDE_REG_WRITE(mdata, offset, value) \
sde_reg_w(&mdata->sde_io, offset, value, 0)
#define SDE_REG_READ(mdata, offset) \
sde_reg_r(&mdata->sde_io, offset, 0)
#define ATRACE_END(name) trace_rot_mark_write(current->tgid, name, 0)
#define ATRACE_BEGIN(name) trace_rot_mark_write(current->tgid, name, 1)
#define ATRACE_INT(name, value) \
trace_rot_trace_counter(current->tgid, name, value)
#endif /* __SDE_ROTATOR_BASE__ */

ファイル差分が大きすぎるため省略します 差分を読み込み

ファイルの表示

@@ -0,0 +1,868 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef SDE_ROTATOR_CORE_H
#define SDE_ROTATOR_CORE_H
#include <linux/list.h>
#include <linux/file.h>
#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/pm_runtime.h>
#include <linux/kthread.h>
#include "sde_rotator_base.h"
#include "sde_rotator_util.h"
#include "sde_rotator_sync.h"
/**********************************************************************
* Rotation request flag
**********************************************************************/
/* no rotation flag */
#define SDE_ROTATION_NOP 0x01
/* left/right flip */
#define SDE_ROTATION_FLIP_LR 0x02
/* up/down flip */
#define SDE_ROTATION_FLIP_UD 0x04
/* rotate 90 degree */
#define SDE_ROTATION_90 0x08
/* rotate 180 degre */
#define SDE_ROTATION_180 (SDE_ROTATION_FLIP_LR | SDE_ROTATION_FLIP_UD)
/* rotate 270 degree */
#define SDE_ROTATION_270 (SDE_ROTATION_90 | SDE_ROTATION_180)
/* format is interlaced */
#define SDE_ROTATION_DEINTERLACE 0x10
/* secure data */
#define SDE_ROTATION_SECURE 0x80
/* verify input configuration only */
#define SDE_ROTATION_VERIFY_INPUT_ONLY 0x10000
/* use client provided dma buf instead of ion fd */
#define SDE_ROTATION_EXT_DMA_BUF 0x20000
/* secure camera operation*/
#define SDE_ROTATION_SECURE_CAMERA 0x40000
/* use client mapped i/o virtual address */
#define SDE_ROTATION_EXT_IOVA 0x80000
/* use client provided clock/bandwidth parameters */
#define SDE_ROTATION_EXT_PERF 0x100000
#define SDE_ROTATION_BUS_PATH_MAX 0x2
/*
* The AMC bucket denotes constraints that are applied to hardware when
* icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
* when the execution environment transitions between active and low power mode.
*/
#define QCOM_ICC_BUCKET_AMC 0
#define QCOM_ICC_BUCKET_WAKE 1
#define QCOM_ICC_BUCKET_SLEEP 2
#define QCOM_ICC_NUM_BUCKETS 3
#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
QCOM_ICC_TAG_SLEEP)
/**********************************************************************
* configuration structures
**********************************************************************/
/*
* struct sde_rotation_buf_info - input/output buffer configuration
* @width: width of buffer region to be processed
* @height: height of buffer region to be processed
* @format: pixel format of buffer
* @comp_ratio: compression ratio for the session
* @sbuf: true if buffer is streaming buffer
*/
struct sde_rotation_buf_info {
uint32_t width;
uint32_t height;
uint32_t format;
struct sde_mult_factor comp_ratio;
bool sbuf;
};
/*
* struct sde_rotation_config - rotation configuration for given session
* @session_id: identifier of the given session
* @input: input buffer information
* @output: output buffer information
* @frame_rate: session frame rate in fps
* @clk_rate: requested rotator clock rate if SDE_ROTATION_EXT_PERF is set
* @data_bw: requested data bus bandwidth if SDE_ROTATION_EXT_PERF is set
* @flags: configuration flags, e.g. rotation angle, flip, etc...
*/
struct sde_rotation_config {
uint32_t session_id;
struct sde_rotation_buf_info input;
struct sde_rotation_buf_info output;
uint32_t frame_rate;
uint64_t clk_rate;
uint64_t data_bw;
uint32_t flags;
};
enum sde_rotator_ts {
SDE_ROTATOR_TS_SRCQB, /* enqueue source buffer */
SDE_ROTATOR_TS_DSTQB, /* enqueue destination buffer */
SDE_ROTATOR_TS_FENCE, /* wait for source buffer fence */
SDE_ROTATOR_TS_QUEUE, /* wait for h/w resource */
SDE_ROTATOR_TS_COMMIT, /* prepare h/w command */
SDE_ROTATOR_TS_START, /* wait for h/w kickoff rdy (inline) */
SDE_ROTATOR_TS_FLUSH, /* initiate h/w processing */
SDE_ROTATOR_TS_DONE, /* receive h/w completion */
SDE_ROTATOR_TS_RETIRE, /* signal destination buffer fence */
SDE_ROTATOR_TS_SRCDQB, /* dequeue source buffer */
SDE_ROTATOR_TS_DSTDQB, /* dequeue destination buffer */
SDE_ROTATOR_TS_MAX
};
enum sde_rotator_clk_type {
SDE_ROTATOR_CLK_MDSS_AHB,
SDE_ROTATOR_CLK_MDSS_AXI,
SDE_ROTATOR_CLK_MDSS_ROT_SUB,
SDE_ROTATOR_CLK_MDSS_ROT,
SDE_ROTATOR_CLK_MNOC_AHB,
SDE_ROTATOR_CLK_GCC_AHB,
SDE_ROTATOR_CLK_GCC_AXI,
SDE_ROTATOR_CLK_MAX
};
enum sde_rotator_trigger {
SDE_ROTATOR_TRIGGER_IMMEDIATE,
SDE_ROTATOR_TRIGGER_VIDEO,
SDE_ROTATOR_TRIGGER_COMMAND,
};
enum sde_rotator_mode {
SDE_ROTATOR_MODE_OFFLINE,
SDE_ROTATOR_MODE_SBUF,
SDE_ROTATOR_MODE_MAX,
};
struct sde_rotation_item {
/* rotation request flag */
uint32_t flags;
/* rotation trigger mode */
uint32_t trigger;
/* prefill bandwidth in Bps */
uint64_t prefill_bw;
/* Source crop rectangle */
struct sde_rect src_rect;
/* Destination rectangle */
struct sde_rect dst_rect;
/* Input buffer for the request */
struct sde_layer_buffer input;
/* The output buffer for the request */
struct sde_layer_buffer output;
/*
* DMA pipe selection for this request by client:
* 0: DMA pipe 0
* 1: DMA pipe 1
* or SDE_ROTATION_HW_ANY if client wants
* driver to allocate any that is available
*
* OR
*
* Reserved
*/
uint32_t pipe_idx;
/*
* Write-back block selection for this request by client:
* 0: Write-back block 0
* 1: Write-back block 1
* or SDE_ROTATION_HW_ANY if client wants
* driver to allocate any that is available
*
* OR
*
* Priority selection for this request by client:
* 0: Highest
* 1..n: Limited by the lowest available priority
*/
uint32_t wb_idx;
/*
* Sequence ID of this request within the session
*/
uint32_t sequence_id;
/* Which session ID is this request scheduled on */
uint32_t session_id;
/* Time stamp for profiling purposes */
ktime_t *ts;
/* Completion structure for inline rotation */
struct completion inline_start;
};
/*
* Defining characteristics about rotation work, that has corresponding
* fmt and roi checks in open session
*/
#define SDE_ROT_DEFINING_FLAG_BITS SDE_ROTATION_90
struct sde_rot_entry;
struct sde_rot_perf;
struct sde_rot_clk {
struct clk *clk;
char clk_name[32];
unsigned long rate;
};
struct sde_rot_hw_resource {
u32 wb_id;
u32 pending_count;
atomic_t num_active;
int max_active;
wait_queue_head_t wait_queue;
};
struct sde_rot_queue {
struct kthread_worker rot_kw;
struct task_struct *rot_thread;
struct sde_rot_timeline *timeline;
struct sde_rot_hw_resource *hw;
};
struct sde_rot_queue_v1 {
struct kthread_worker *rot_kw;
struct task_struct *rot_thread;
struct sde_rot_timeline *timeline;
struct sde_rot_hw_resource *hw;
};
/*
* struct sde_rot_entry_container - rotation request
* @list: list of active requests managed by rotator manager
* @flags: reserved
* @count: size of rotation entries
* @pending_count: count of entries pending completion
* @failed_count: count of entries failed completion
* @finished: true if client is finished with the request
* @retireq: workqueue to post completion notification
* @retire_work: work for completion notification
* @entries: array of rotation entries
*/
struct sde_rot_entry_container {
struct list_head list;
u32 flags;
u32 count;
atomic_t pending_count;
atomic_t failed_count;
struct kthread_worker *retire_kw;
struct kthread_work *retire_work;
bool finished;
struct sde_rot_entry *entries;
};
struct sde_rot_mgr;
struct sde_rot_file_private;
/*
* struct sde_rot_entry - rotation entry
* @item: rotation item
* @commit_work: work descriptor for commit handler
* @done_work: work descriptor for done handler
* @commitq: pointer to commit handler rotator queue
* @fenceq: pointer to fence signaling rotator queue
* @doneq: pointer to done handler rotator queue
* @request: pointer to containing request
* @src_buf: descriptor of source buffer
* @dst_buf: descriptor of destination buffer
* @input_fence: pointer to input fence for when input content is available
* @output_fence: pointer to output fence for when output content is available
* @output_signaled: true if output fence of this entry has been signaled
* @dnsc_factor_w: calculated width downscale factor for this entry
* @dnsc_factor_w: calculated height downscale factor for this entry
* @perf: pointer to performance configuration associated with this entry
* @work_assigned: true if this item is assigned to h/w queue/unit
* @private: pointer to controlling session context
*/
struct sde_rot_entry {
struct sde_rotation_item item;
struct kthread_work commit_work;
struct kthread_work done_work;
struct sde_rot_queue *commitq;
struct sde_rot_queue_v1 *fenceq;
struct sde_rot_queue *doneq;
struct sde_rot_entry_container *request;
struct sde_mdp_data src_buf;
struct sde_mdp_data dst_buf;
struct sde_rot_sync_fence *input_fence;
struct sde_rot_sync_fence *output_fence;
bool output_signaled;
u32 dnsc_factor_w;
u32 dnsc_factor_h;
struct sde_rot_perf *perf;
bool work_assigned; /* Used when cleaning up work_distribution */
struct sde_rot_file_private *private;
};
/*
* struct sde_rot_trace_entry - structure used to pass info to trace
*/
struct sde_rot_trace_entry {
u32 wb_idx;
u32 flags;
u32 input_format;
u32 input_width;
u32 input_height;
u32 src_x;
u32 src_y;
u32 src_w;
u32 src_h;
u32 output_format;
u32 output_width;
u32 output_height;
u32 dst_x;
u32 dst_y;
u32 dst_w;
u32 dst_h;
};
/*
* struct sde_rot_perf - rotator session performance configuration
* @list: list of performance configuration under one session
* @config: current rotation configuration
* @clk_rate: current clock rate in Hz
* @bw: current bandwidth in byte per second
* @work_dis_lock: serialization lock for updating work distribution (not used)
* @work_distribution: work distribution among multiple hardware queue/unit
* @last_wb_idx: last queue/unit index, used to account for pre-distributed work
* @rdot_limit: read OT limit of this session
* @wrot_limit: write OT limit of this session
*/
struct sde_rot_perf {
struct list_head list;
struct sde_rotation_config config;
unsigned long clk_rate;
u64 bw;
struct mutex work_dis_lock;
u32 *work_distribution;
int last_wb_idx; /* last known wb index, used when above count is 0 */
u32 rdot_limit;
u32 wrot_limit;
};
/*
* struct sde_rot_file_private - rotator manager per session context
* @list: list of all session context
* @req_list: list of rotation request for this session
* @perf_list: list of performance configuration for this session (only one)
* @mgr: pointer to the controlling rotator manager
* @fenceq: pointer to rotator queue to signal when entry is done
*/
struct sde_rot_file_private {
struct list_head list;
struct list_head req_list;
struct list_head perf_list;
struct sde_rot_mgr *mgr;
struct sde_rot_queue_v1 *fenceq;
};
/**
* struct sde_rot_bus_data_type: power handle struct for data bus
* @data_paths_cnt: number of rt data path ports
* @curr_quota_val: save the current bus value
* @curr_bw_uc_idx: current reg bus value index
* @bus_active_only: AMC support, can set the bus path WAKE/SLEEP
*/
struct sde_rot_bus_data_type {
struct icc_path *data_bus_hdl[SDE_ROTATION_BUS_PATH_MAX];
u32 data_paths_cnt;
u64 curr_quota_val;
u32 curr_bw_uc_idx;
bool bus_active_only;
};
/*
* struct sde_rot_mgr - core rotator manager
* @lock: serialization lock to rotator manager functions
* @device_suspended: 0 if device is not suspended; non-zero suspended
* @pdev: pointer to controlling platform device
* @device: pointer to controlling device
* @queue_count: number of hardware queue/unit available
* @commitq: array of rotator commit queue corresponding to hardware queue
* @doneq: array of rotator done queue corresponding to hardware queue
* @file_list: list of all sessions managed by rotator manager
* @pending_close_bw_vote: bandwidth of closed sessions with pending work
* @minimum_bw_vote: minimum bandwidth required for current use case
* @enable_bw_vote: minimum bandwidth required for power enable
* @data_bus: data bus configuration state
* @reg_bus: register bus configuration state
* @module_power: power/clock configuration state
* @regulator_enable: true if foot switch is enabled; false otherwise
* @res_ref_cnt: reference count of how many times resource is requested
* @rot_enable_clk_cnt: reference count of how many times clock is requested
* @rot_clk: array of rotator and periphery clocks
* @num_rot_clk: size of the rotator clock array
* @rdot_limit: current read OT limit
* @wrot_limit: current write OT limit
* @hwacquire_timeout: maximum wait time for hardware availability in msec
* @pixel_per_clk: rotator hardware performance in pixel for clock
* @fudge_factor: fudge factor for clock calculation
* @overhead: software overhead for offline rotation in msec
* @min_rot_clk: minimum rotator clock rate
* @max_rot_clk: maximum allowed rotator clock rate
* @sbuf_ctx: pointer to sbuf session context
* @ops_xxx: function pointers of rotator HAL layer
* @hw_data: private handle of rotator HAL layer
*/
struct sde_rot_mgr {
struct mutex lock;
atomic_t device_suspended;
struct platform_device *pdev;
struct device *device;
struct kthread_work thread_priority_work;
/*
* Managing rotation queues, depends on
* how many hw pipes available on the system
*/
int queue_count;
struct sde_rot_queue *commitq;
struct sde_rot_queue *doneq;
/*
* managing all the open file sessions to bw calculations,
* and resource clean up during suspend
*/
struct list_head file_list;
u64 pending_close_bw_vote;
u64 minimum_bw_vote;
u64 enable_bw_vote;
struct sde_rot_bus_data_type data_bus;
struct sde_rot_bus_data_type reg_bus;
/* Module power is only used for regulator management */
struct sde_module_power module_power;
bool regulator_enable;
int res_ref_cnt;
int rot_enable_clk_cnt;
struct sde_rot_clk *rot_clk;
int num_rot_clk;
u32 rdot_limit;
u32 wrot_limit;
u32 hwacquire_timeout;
struct sde_mult_factor pixel_per_clk;
struct sde_mult_factor fudge_factor;
struct sde_mult_factor overhead;
unsigned long min_rot_clk;
unsigned long max_rot_clk;
struct sde_rot_file_private *sbuf_ctx;
int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
int (*ops_cancel_hw)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
int (*ops_abort_hw)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
int (*ops_wait_for_entry)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
struct sde_rot_hw_resource *(*ops_hw_alloc)(struct sde_rot_mgr *mgr,
u32 pipe_id, u32 wb_id);
void (*ops_hw_free)(struct sde_rot_mgr *mgr,
struct sde_rot_hw_resource *hw);
int (*ops_hw_init)(struct sde_rot_mgr *mgr);
void (*ops_hw_pre_pmevent)(struct sde_rot_mgr *mgr, bool pmon);
void (*ops_hw_post_pmevent)(struct sde_rot_mgr *mgr, bool pmon);
void (*ops_hw_destroy)(struct sde_rot_mgr *mgr);
ssize_t (*ops_hw_show_caps)(struct sde_rot_mgr *mgr,
struct device_attribute *attr, char *buf, ssize_t len);
ssize_t (*ops_hw_show_state)(struct sde_rot_mgr *mgr,
struct device_attribute *attr, char *buf, ssize_t len);
int (*ops_hw_create_debugfs)(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root);
int (*ops_hw_validate_entry)(struct sde_rot_mgr *mgr,
struct sde_rot_entry *entry);
u32 (*ops_hw_get_pixfmt)(struct sde_rot_mgr *mgr, int index,
bool input, u32 mode);
int (*ops_hw_is_valid_pixfmt)(struct sde_rot_mgr *mgr, u32 pixfmt,
bool input, u32 mode);
int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
int len);
int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
void (*ops_hw_dump_status)(struct sde_rot_mgr *mgr);
void *hw_data;
};
static inline int sde_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr,
u32 pixfmt, bool input, u32 mode)
{
if (mgr && mgr->ops_hw_is_valid_pixfmt)
return mgr->ops_hw_is_valid_pixfmt(mgr, pixfmt, input, mode);
return false;
}
static inline u32 sde_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
int index, bool input, u32 mode)
{
if (mgr && mgr->ops_hw_get_pixfmt)
return mgr->ops_hw_get_pixfmt(mgr, index, input, mode);
return 0;
}
static inline int sde_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
char *caps, int len)
{
if (mgr && mgr->ops_hw_get_downscale_caps)
return mgr->ops_hw_get_downscale_caps(mgr, caps, len);
return 0;
}
static inline int sde_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
{
if (mgr && mgr->ops_hw_get_maxlinewidth)
return mgr->ops_hw_get_maxlinewidth(mgr);
return 2048;
}
static inline int __compare_session_item_rect(
struct sde_rotation_buf_info *s_rect,
struct sde_rect *i_rect, uint32_t i_fmt, bool src)
{
if ((s_rect->width != i_rect->w) || (s_rect->height != i_rect->h) ||
(s_rect->format != i_fmt)) {
SDEROT_DBG(
"%s: session{%u,%u}f:%u mismatch from item{%u,%u}f:%u\n",
(src ? "src":"dst"), s_rect->width, s_rect->height,
s_rect->format, i_rect->w, i_rect->h, i_fmt);
return -EINVAL;
}
return 0;
}
/*
* Compare all important flag bits associated with rotation between session
* config and item request. Format and roi validation is done during open
* session and is based certain defining bits. If these defining bits are
* different in item request, there is a possibility that rotation item
* is not a valid configuration.
*/
static inline int __compare_session_rotations(uint32_t cfg_flag,
uint32_t item_flag)
{
cfg_flag &= SDE_ROT_DEFINING_FLAG_BITS;
item_flag &= SDE_ROT_DEFINING_FLAG_BITS;
if (cfg_flag != item_flag) {
SDEROT_DBG(
"Rotation degree request different from open session\n");
return -EINVAL;
}
return 0;
}
/*
* sde_rotator_core_init - initialize rotator manager for the given platform
* device
* @pmgr: Pointer to pointer of the newly initialized rotator manager
* @pdev: Pointer to platform device
* return: 0 if success; error code otherwise
*/
int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
struct platform_device *pdev);
/*
* sde_rotator_core_destroy - destroy given rotator manager
* @mgr: Pointer to rotator manager
* return: none
*/
void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
/*
* sde_rotator_core_dump - perform register dump
* @mgr: Pointer to rotator manager
*/
void sde_rotator_core_dump(struct sde_rot_mgr *mgr);
/*
* sde_rotator_session_open - open a new rotator per file session
* @mgr: Pointer to rotator manager
* @pprivate: Pointer to pointer of the newly initialized per file session
* @session_id: identifier of the newly created session
* @queue: Pointer to fence queue of the new session
* return: 0 if success; error code otherwise
*/
int sde_rotator_session_open(struct sde_rot_mgr *mgr,
struct sde_rot_file_private **pprivate, int session_id,
struct sde_rot_queue_v1 *queue);
/*
* sde_rotator_session_close - close the given rotator per file session
* @mgr: Pointer to rotator manager
* @private: Pointer to per file session
* @session_id: identifier of the session
* return: none
*/
void sde_rotator_session_close(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private, int session_id);
/*
* sde_rotator_session_config - configure the given rotator per file session
* @mgr: Pointer to rotator manager
* @private: Pointer to per file session
* @config: Pointer to rotator configuration
* return: 0 if success; error code otherwise
*/
int sde_rotator_session_config(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private,
struct sde_rotation_config *config);
/*
* sde_rotator_session_validate - validate session configuration
* @mgr: Pointer to rotator manager
* @private: Pointer to per file session
* @config: Pointer to rotator configuration
* return: 0 if success; error code otherwise
*/
int sde_rotator_session_validate(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private,
struct sde_rotation_config *config);
/*
* sde_rotator_req_init - allocate a new request and initialzie with given
* array of rotation items
* @rot_dev: Pointer to rotator device
* @private: Pointer to rotator manager per file context
* @items: Pointer to array of rotation item
* @count: size of rotation item array
* @flags: rotation request flags
* return: Pointer to new rotation request if success; ERR_PTR otherwise
*/
struct sde_rot_entry_container *sde_rotator_req_init(
struct sde_rot_mgr *rot_dev,
struct sde_rot_file_private *private,
struct sde_rotation_item *items,
u32 count, u32 flags);
/*
* sde_rotator_req_reset_start - reset inline h/w 'start' indicator
* For inline rotations, the time of rotation start is not controlled
* by the rotator driver. This function resets an internal 'start'
* indicator that allows the rotator to delay its rotator
* timeout waiting until such time as the inline rotation has
* really started.
* @mgr: Pointer to rotator manager
* @req: Pointer to rotation request
*/
void sde_rotator_req_reset_start(struct sde_rot_mgr *mgr,
struct sde_rot_entry_container *req);
/*
* sde_rotator_req_set_start - set inline h/w 'start' indicator
* @mgr: Pointer to rotator manager
* @req: Pointer to rotation request
*/
void sde_rotator_req_set_start(struct sde_rot_mgr *mgr,
struct sde_rot_entry_container *req);
/*
* sde_rotator_req_wait_start - wait for inline h/w 'start' indicator
* @mgr: Pointer to rotator manager
* @req: Pointer to rotation request
* return: Zero on success
*/
int sde_rotator_req_wait_start(struct sde_rot_mgr *mgr,
struct sde_rot_entry_container *req);
/*
* sde_rotator_req_finish - notify manager that client is finished with the
* given request and manager can release the request as required
* @mgr: Pointer to rotator manager
* @private: Pointer to rotator manager per file context
* @req: Pointer to rotation request
* return: none
*/
void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private,
struct sde_rot_entry_container *req);
/*
* sde_rotator_abort_inline_request - abort inline rotation request after start
* This function allows inline rotation requests to be aborted after
* sde_rotator_req_set_start has already been issued.
* @mgr: Pointer to rotator manager
* @private: Pointer to rotator manager per file context
* @req: Pointer to rotation request
* return: none
*/
void sde_rotator_abort_inline_request(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private,
struct sde_rot_entry_container *req);
/*
* sde_rotator_handle_request_common - add the given request to rotator
* manager and clean up completed requests
* @rot_dev: Pointer to rotator device
* @private: Pointer to rotator manager per file context
* @req: Pointer to rotation request
* return: 0 if success; error code otherwise
*/
int sde_rotator_handle_request_common(struct sde_rot_mgr *rot_dev,
struct sde_rot_file_private *ctx,
struct sde_rot_entry_container *req);
/*
* sde_rotator_queue_request - queue/schedule the given request for h/w commit
* @rot_dev: Pointer to rotator device
* @private: Pointer to rotator manager per file context
* @req: Pointer to rotation request
* return: 0 if success; error code otherwise
*/
void sde_rotator_queue_request(struct sde_rot_mgr *rot_dev,
struct sde_rot_file_private *ctx,
struct sde_rot_entry_container *req);
/*
* sde_rotator_verify_config_all - verify given rotation configuration
* @rot_dev: Pointer to rotator device
* @config: Pointer to rotator configuration
* return: 0 if success; error code otherwise
*/
int sde_rotator_verify_config_all(struct sde_rot_mgr *rot_dev,
struct sde_rotation_config *config);
/*
* sde_rotator_verify_config_input - verify rotation input configuration
* @rot_dev: Pointer to rotator device
* @config: Pointer to rotator configuration
* return: 0 if success; error code otherwise
*/
int sde_rotator_verify_config_input(struct sde_rot_mgr *rot_dev,
struct sde_rotation_config *config);
/*
* sde_rotator_verify_config_output - verify rotation output configuration
* @rot_dev: Pointer to rotator device
* @config: Pointer to rotator configuration
* return: 0 if success; error code otherwise
*/
int sde_rotator_verify_config_output(struct sde_rot_mgr *rot_dev,
struct sde_rotation_config *config);
/*
* sde_rotator_validate_request - validates given rotation request with
* previous rotator configuration
* @rot_dev: Pointer to rotator device
* @private: Pointer to rotator manager per file context
* @req: Pointer to rotation request
* return: 0 if success; error code otherwise
*/
int sde_rotator_validate_request(struct sde_rot_mgr *rot_dev,
struct sde_rot_file_private *ctx,
struct sde_rot_entry_container *req);
/*
* sde_rotator_clk_ctrl - enable/disable rotator clock with reference counting
* @mgr: Pointer to rotator manager
* @enable: true to enable clock; false to disable clock
* return: 0 if success; error code otherwise
*/
int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable);
/* sde_rotator_resource_ctrl_enabled - check if resource control is enabled
* @mgr: Pointer to rotator manager
* Return: true if enabled; false otherwise
*/
static inline int sde_rotator_resource_ctrl_enabled(struct sde_rot_mgr *mgr)
{
return mgr->regulator_enable;
}
/*
* sde_rotator_cancel_all_requests - cancel all outstanding requests
* @mgr: Pointer to rotator manager
* @private: Pointer to rotator manager per file context
*/
void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private);
/*
* sde_rot_mgr_lock - serialization lock prior to rotator manager calls
* @mgr: Pointer to rotator manager
*/
static inline void sde_rot_mgr_lock(struct sde_rot_mgr *mgr)
{
mutex_lock(&mgr->lock);
}
/*
* sde_rot_mgr_lock - serialization unlock after rotator manager calls
* @mgr: Pointer to rotator manager
*/
static inline void sde_rot_mgr_unlock(struct sde_rot_mgr *mgr)
{
mutex_unlock(&mgr->lock);
}
/*
* sde_rot_mgr_pd_enabled - return true if power domain is enabled
* @mgr: Pointer to rotator manager
*/
static inline bool sde_rot_mgr_pd_enabled(struct sde_rot_mgr *mgr)
{
return mgr && mgr->device && mgr->device->pm_domain;
}
#if defined(CONFIG_PM)
int sde_rotator_runtime_resume(struct device *dev);
int sde_rotator_runtime_suspend(struct device *dev);
int sde_rotator_runtime_idle(struct device *dev);
#endif
#if defined(CONFIG_PM_SLEEP)
int sde_rotator_pm_suspend(struct device *dev);
int sde_rotator_pm_resume(struct device *dev);
#endif
#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
int sde_rotator_suspend(struct platform_device *dev, pm_message_t state);
int sde_rotator_resume(struct platform_device *dev);
#else
#define sde_rotator_suspend NULL
#define sde_rotator_resume NULL
#endif
#endif /* __SDE_ROTATOR_CORE_H__ */

ファイル差分が大きすぎるため省略します 差分を読み込み

ファイルの表示

@@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_DEBUG_H__
#define __SDE_ROTATOR_DEBUG_H__
#include <linux/types.h>
#include <linux/dcache.h>
#define SDE_ROT_DATA_LIMITER (-1)
#define SDE_ROT_EVTLOG_TOUT_DATA_LIMITER (NULL)
#define SDE_ROT_EVTLOG_PANIC 0xdead
#define SDE_ROT_EVTLOG_FATAL 0xbad
#define SDE_ROT_EVTLOG_ERROR 0xebad
enum sde_rot_dbg_reg_dump_flag {
SDE_ROT_DBG_DUMP_IN_LOG = BIT(0),
SDE_ROT_DBG_DUMP_IN_MEM = BIT(1),
};
enum sde_rot_dbg_evtlog_flag {
SDE_ROT_EVTLOG_DEFAULT = BIT(0),
SDE_ROT_EVTLOG_IOMMU = BIT(1),
SDE_ROT_EVTLOG_DBG = BIT(6),
SDE_ROT_EVTLOG_ALL = BIT(7)
};
#define SDEROT_EVTLOG(...) sde_rot_evtlog(__func__, __LINE__, \
SDE_ROT_EVTLOG_DEFAULT, ##__VA_ARGS__, SDE_ROT_DATA_LIMITER)
#define SDEROT_EVTLOG_TOUT_HANDLER(...) \
sde_rot_evtlog_tout_handler(false, __func__, ##__VA_ARGS__, \
SDE_ROT_EVTLOG_TOUT_DATA_LIMITER)
#if defined(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG) && \
defined(CONFIG_DEBUG_FS)
void sde_rot_evtlog(const char *name, int line, int flag, ...);
void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...);
#else
static inline
void sde_rot_evtlog(const char *name, int line, int flag, ...)
{
}
static inline
void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
{
}
#endif
struct sde_rotator_device;
struct sde_rotator_debug_base {
char name[80];
void __iomem *base;
size_t off;
size_t cnt;
size_t max_offset;
char *buf;
size_t buf_len;
struct sde_rot_mgr *mgr;
struct mutex buflock;
};
#if defined(CONFIG_DEBUG_FS)
struct dentry *sde_rotator_create_debugfs(
struct sde_rotator_device *rot_dev);
void sde_rotator_destroy_debugfs(struct dentry *debugfs);
#else
static inline
struct dentry *sde_rotator_create_debugfs(
struct sde_rotator_device *rot_dev)
{
return NULL;
}
static inline
void sde_rotator_destroy_debugfs(struct dentry *debugfs)
{
}
#endif
#endif /* __SDE_ROTATOR_DEBUG_H__ */

ファイル差分が大きすぎるため省略します 差分を読み込み

ファイルの表示

@@ -0,0 +1,259 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_DEV_H__
#define __SDE_ROTATOR_DEV_H__
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/slab.h>
#include <linux/ktime.h>
#include <linux/iommu.h>
#include <linux/dma-buf.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/llcc-qcom.h>
#include <linux/kthread.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
#include <media/msm_sde_rotator.h>
#include "sde_rotator_core.h"
#include "sde_rotator_sync.h"
/* Rotator device name */
#define SDE_ROTATOR_DRV_NAME "sde_rotator"
/* Event logging constants */
#define SDE_ROTATOR_NUM_EVENTS 4096
#define SDE_ROTATOR_NUM_TIMESTAMPS SDE_ROTATOR_TS_MAX
/* maximum number of outstanding requests per ctx session */
#define SDE_ROTATOR_REQUEST_MAX 2
#define MAX_ROT_OPEN_SESSION 16
struct sde_rotator_device;
struct sde_rotator_ctx;
/*
* struct sde_rotator_buf_handle - Structure contain rotator buffer information.
* @fd: ion file descriptor from which this buffer is imported.
* @rot_dev: Pointer to rotator device.
* @ctx: Pointer to rotator context.
* @size: Size of the buffer.
* @addr: Address of rotator mmu mapped buffer.
* @secure: Non-secure/secure buffer.
* @buffer: Pointer to dma buf associated with this fd.
*/
struct sde_rotator_buf_handle {
int fd;
struct sde_rotator_device *rot_dev;
struct sde_rotator_ctx *ctx;
unsigned long size;
dma_addr_t addr;
int secure;
struct dma_buf *buffer;
};
/*
* struct sde_rotator_vbinfo - Structure define video buffer info.
* @fd: fence file descriptor.
* @fence: fence associated with fd.
* @fence_ts: completion timestamp associated with fd
* @qbuf_ts: timestamp associated with buffer queue event
* @dqbuf_ts: Pointer to timestamp associated with buffer dequeue event
* @comp_ratio: compression ratio of this buffer
*/
struct sde_rotator_vbinfo {
int fd;
struct sde_rot_sync_fence *fence;
u32 fence_ts;
ktime_t qbuf_ts;
ktime_t *dqbuf_ts;
struct sde_mult_factor comp_ratio;
};
/*
* struct sde_rotator_request - device layer rotation request
* @list: list head for submit/retire list
* @submit_work: submit work structure
* @retire_work: retire work structure
* @req: Pointer to core layer rotator manager request
* Request can be freed by core layer during sde_rotator_stop_streaming.
* Avoid dereference in dev layer if possible.
* @ctx: Pointer to parent context
* @committed: true if request committed to hardware
* @sequence_id: sequence identifier of this request
*/
struct sde_rotator_request {
struct list_head list;
struct kthread_work submit_work;
struct kthread_work retire_work;
struct sde_rot_entry_container *req;
struct sde_rotator_ctx *ctx;
bool committed;
u32 sequence_id;
};
/*
* struct sde_rotator_ctx - Structure contains per open file handle context.
* @kobj: kernel object of this context
* @rot_dev: Pointer to rotator device.
* @file: Pointer to device file handle
* @fh: V4l2 file handle.
* @ctrl_handler: control handler
* @format_cap: Current capture format.
* @format_out: Current output format.
* @crop_cap: Current capture crop.
* @crop_out: Current output crop.
* @timeperframe: Time per frame in seconds.
* @session_id: unique id for this context
* @hflip: horizontal flip (1-flip)
* @vflip: vertical flip (1-flip)
* @rotate: rotation angle (0,90,180,270)
* @secure: Non-secure (0) / Secure processing
* @abort_pending: True if abort is requested for async handling.
* @nbuf_cap: Number of requested buffer for capture queue
* @nbuf_out: Number of requested buffer for output queue
* @fence_cap: Fence info for each requested capture buffer
* @fence_out: Fence info for each requested output buffer
* @wait_queue: Wait queue for signaling end of job
* @work_queue: work queue for submit and retire processing
* @private: Pointer to session private information
* @slice: Pointer to system cache slice descriptor
* @commit_sequence_id: last committed sequence id
* @retired_sequence_id: last retired sequence id
* @list_lock: lock for pending/retired list
* @pending_list: list of pending request
* @retired_list: list of retired/free request
* @requests: static allocation of free requests
* @rotcfg: current core rotation configuration
* @kthread_id: thread_id used for fence management
*/
struct sde_rotator_ctx {
struct kobject kobj;
struct sde_rotator_device *rot_dev;
struct file *file;
struct v4l2_fh fh;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_format format_cap;
struct v4l2_format format_out;
struct v4l2_rect crop_cap;
struct v4l2_rect crop_out;
struct v4l2_fract timeperframe;
u32 session_id;
s32 hflip;
s32 vflip;
s32 rotate;
s32 secure;
s32 secure_camera;
int abort_pending;
int nbuf_cap;
int nbuf_out;
struct sde_rotator_vbinfo *vbinfo_cap;
struct sde_rotator_vbinfo *vbinfo_out;
wait_queue_head_t wait_queue;
struct sde_rot_queue_v1 work_queue;
struct sde_rot_file_private *private;
struct llcc_slice_desc *slice;
u32 commit_sequence_id;
u32 retired_sequence_id;
spinlock_t list_lock;
struct list_head pending_list;
struct list_head retired_list;
struct sde_rotator_request requests[SDE_ROTATOR_REQUEST_MAX];
struct sde_rotation_config rotcfg;
int kthread_id;
};
/*
* struct sde_rotator_statistics - Storage for statistics
* @count: Number of processed request
* @fail_count: Number of failed request
* @ts: Timestamps of most recent requests
*/
struct sde_rotator_statistics {
u64 count;
u64 fail_count;
ktime_t ts[SDE_ROTATOR_NUM_EVENTS][SDE_ROTATOR_NUM_TIMESTAMPS];
};
/*
* struct sde_rotator_device - FD device structure.
* @lock: Lock protecting this device structure and serializing IOCTL.
* @dev: Pointer to device struct.
* @v4l2_dev: V4l2 device.
* @vdev: Pointer to video device.
* @m2m_dev: Memory to memory device.
* @pdev: Pointer to platform device.
* @drvdata: Pointer to driver data.
* @early_submit: flag enable job submission in ready state.
* @disable_syscache: true to disable system cache
* @mgr: Pointer to core rotator manager.
* @mdata: Pointer to common rotator data/resource.
* @session_id: Next context session identifier
* @fence_timeout: Timeout value in msec for fence wait
* @streamoff_timeout: Timeout value in msec for stream off
* @min_rot_clk: Override the minimum rotator clock from perf calculation
* @min_bw: Override the minimum bandwidth from perf calculation
* @min_overhead_us: Override the minimum overhead in us from perf calculation
* @debugfs_root: Pointer to debugfs directory entry.
* @stats: placeholder for rotator statistics
* @open_timeout: maximum wait time for ctx open in msec
* @open_wq: wait queue for ctx open
* @excl_ctx: Pointer to exclusive ctx
* @rot_kw: rotator thread work
* @rot_thread: rotator threads
* @kthread_free: check if thread is available or not
*/
struct sde_rotator_device {
struct mutex lock;
struct device *dev;
struct v4l2_device v4l2_dev;
struct video_device *vdev;
#ifndef CONFIG_MSM_SDE_ROTATOR_INIT_ONLY
struct v4l2_m2m_dev *m2m_dev;
#endif
struct platform_device *pdev;
const void *drvdata;
u32 early_submit;
u32 disable_syscache;
struct sde_rot_mgr *mgr;
struct sde_rot_data_type *mdata;
u32 session_id;
u32 fence_timeout;
u32 streamoff_timeout;
u32 min_rot_clk;
u32 min_bw;
u32 min_overhead_us;
struct sde_rotator_statistics stats;
struct dentry *debugfs_root;
struct dentry *perf_root;
u32 open_timeout;
wait_queue_head_t open_wq;
struct sde_rotator_ctx *excl_ctx;
struct kthread_worker rot_kw[MAX_ROT_OPEN_SESSION];
struct task_struct *rot_thread[MAX_ROT_OPEN_SESSION];
bool kthread_free[MAX_ROT_OPEN_SESSION];
};
static inline
struct sde_rot_mgr *sde_rot_mgr_from_pdevice(struct platform_device *pdev)
{
return ((struct sde_rotator_device *) platform_get_drvdata(pdev))->mgr;
}
static inline
struct sde_rot_mgr *sde_rot_mgr_from_device(struct device *dev)
{
return ((struct sde_rotator_device *) dev_get_drvdata(dev))->mgr;
}
void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata);
#endif /* __SDE_ROTATOR_DEV_H__ */

ファイルの表示

@@ -0,0 +1,943 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <media/msm_sde_rotator.h>
#include "sde_rotator_formats.h"
#include "sde_rotator_util.h"
#define FMT_RGB_565(fmt, desc, frame_fmt, flag_arg, e0, e1, e2, isubwc) \
{ \
.format = (fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
.unpack_tight = 1, \
.unpack_align_msb = 0, \
.alpha_enable = 0, \
.unpack_count = 3, \
.bpp = 2, \
.frame_format = (frame_fmt), \
.pixel_mode = SDE_MDP_PIXEL_NORMAL, \
.element = { (e0), (e1), (e2) }, \
.bits = { \
[C2_R_Cr] = SDE_COLOR_5BIT, \
[C0_G_Y] = SDE_COLOR_6BIT, \
[C1_B_Cb] = SDE_COLOR_5BIT, \
}, \
.is_ubwc = isubwc, \
}
#define FMT_RGB_888(fmt, desc, frame_fmt, flag_arg, e0, e1, e2, isubwc) \
{ \
.format = (fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
.unpack_tight = 1, \
.unpack_align_msb = 0, \
.alpha_enable = 0, \
.unpack_count = 3, \
.bpp = 3, \
.frame_format = (frame_fmt), \
.pixel_mode = SDE_MDP_PIXEL_NORMAL, \
.element = { (e0), (e1), (e2) }, \
.bits = { \
[C2_R_Cr] = SDE_COLOR_8BIT, \
[C0_G_Y] = SDE_COLOR_8BIT, \
[C1_B_Cb] = SDE_COLOR_8BIT, \
}, \
.is_ubwc = isubwc, \
}
#define FMT_RGB_8888(fmt, desc, frame_fmt, flag_arg, \
alpha_en, e0, e1, e2, e3, isubwc) \
{ \
.format = (fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
.unpack_tight = 1, \
.unpack_align_msb = 0, \
.alpha_enable = (alpha_en), \
.unpack_count = 4, \
.bpp = 4, \
.frame_format = (frame_fmt), \
.pixel_mode = SDE_MDP_PIXEL_NORMAL, \
.element = { (e0), (e1), (e2), (e3) }, \
.bits = { \
[C3_ALPHA] = SDE_COLOR_8BIT, \
[C2_R_Cr] = SDE_COLOR_8BIT, \
[C0_G_Y] = SDE_COLOR_8BIT, \
[C1_B_Cb] = SDE_COLOR_8BIT, \
}, \
.is_ubwc = isubwc, \
}
#define FMT_YUV10_COMMON(fmt) \
.format = (fmt), \
.is_yuv = 1, \
.bits = { \
[C2_R_Cr] = SDE_COLOR_8BIT, \
[C0_G_Y] = SDE_COLOR_8BIT, \
[C1_B_Cb] = SDE_COLOR_8BIT, \
}, \
.alpha_enable = 0
#define FMT_YUV_COMMON(fmt) \
.format = (fmt), \
.is_yuv = 1, \
.bits = { \
[C2_R_Cr] = SDE_COLOR_8BIT, \
[C0_G_Y] = SDE_COLOR_8BIT, \
[C1_B_Cb] = SDE_COLOR_8BIT, \
}, \
.alpha_enable = 0, \
.unpack_tight = 1, \
.unpack_align_msb = 0
#define FMT_YUV_PSEUDO(fmt, desc, frame_fmt, samp, pixel_type, \
flag_arg, e0, e1, isubwc) \
{ \
FMT_YUV_COMMON(fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR, \
.chroma_sample = samp, \
.unpack_count = 2, \
.bpp = 2, \
.frame_format = (frame_fmt), \
.pixel_mode = (pixel_type), \
.element = { (e0), (e1) }, \
.is_ubwc = isubwc, \
}
#define FMT_YUV_PLANR(fmt, desc, frame_fmt, samp, \
flag_arg, e0, e1) \
{ \
FMT_YUV_COMMON(fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_PLANAR, \
.chroma_sample = samp, \
.bpp = 1, \
.unpack_count = 1, \
.frame_format = (frame_fmt), \
.pixel_mode = SDE_MDP_PIXEL_NORMAL, \
.element = { (e0), (e1) }, \
.is_ubwc = SDE_MDP_COMPRESS_NONE, \
}
#define FMT_RGB_1555(fmt, desc, alpha_en, flag_arg, e0, e1, e2, e3) \
{ \
.format = (fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
.unpack_tight = 1, \
.unpack_align_msb = 0, \
.alpha_enable = (alpha_en), \
.unpack_count = 4, \
.bpp = 2, \
.element = { (e0), (e1), (e2), (e3) }, \
.frame_format = SDE_MDP_FMT_LINEAR, \
.pixel_mode = SDE_MDP_PIXEL_NORMAL, \
.bits = { \
[C3_ALPHA] = SDE_COLOR_ALPHA_1BIT, \
[C2_R_Cr] = SDE_COLOR_5BIT, \
[C0_G_Y] = SDE_COLOR_5BIT, \
[C1_B_Cb] = SDE_COLOR_5BIT, \
}, \
.is_ubwc = SDE_MDP_COMPRESS_NONE, \
}
#define FMT_RGB_4444(fmt, desc, alpha_en, flag_arg, e0, e1, e2, e3) \
{ \
.format = (fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
.unpack_tight = 1, \
.unpack_align_msb = 0, \
.alpha_enable = (alpha_en), \
.unpack_count = 4, \
.bpp = 2, \
.frame_format = SDE_MDP_FMT_LINEAR, \
.pixel_mode = SDE_MDP_PIXEL_NORMAL, \
.element = { (e0), (e1), (e2), (e3) }, \
.bits = { \
[C3_ALPHA] = SDE_COLOR_ALPHA_4BIT, \
[C2_R_Cr] = SDE_COLOR_4BIT, \
[C0_G_Y] = SDE_COLOR_4BIT, \
[C1_B_Cb] = SDE_COLOR_4BIT, \
}, \
.is_ubwc = SDE_MDP_COMPRESS_NONE, \
}
#define FMT_RGB_1010102(fmt, desc, frame_fmt, flag_arg, \
alpha_en, e0, e1, e2, e3, isubwc) \
{ \
.format = (fmt), \
.description = (desc), \
.flag = flag_arg, \
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
.unpack_tight = 1, \
.unpack_align_msb = 0, \
.alpha_enable = (alpha_en), \
.unpack_count = 4, \
.bpp = 4, \
.frame_format = frame_fmt, \
.pixel_mode = SDE_MDP_PIXEL_10BIT, \
.element = { (e0), (e1), (e2), (e3) }, \
.bits = { \
[C3_ALPHA] = SDE_COLOR_8BIT, \
[C2_R_Cr] = SDE_COLOR_8BIT, \
[C0_G_Y] = SDE_COLOR_8BIT, \
[C1_B_Cb] = SDE_COLOR_8BIT, \
}, \
.is_ubwc = isubwc, \
}
/*
* UBWC formats table:
* This table holds the UBWC formats supported.
* If a compression ratio needs to be used for this or any other format,
* the data will be passed by user-space.
*/
static struct sde_mdp_format_params_ubwc sde_mdp_format_ubwc_map[] = {
{
.mdp_format = FMT_RGB_565(SDE_PIX_FMT_RGB_565_UBWC,
"SDE/RGB_565_UBWC",
SDE_MDP_FMT_TILE_A5X, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format = FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_UBWC,
"SDE/RGBA_8888_UBWC",
SDE_MDP_FMT_TILE_A5X, 0, 1,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format = FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_UBWC,
"SDE/RGBX_8888_UBWC",
SDE_MDP_FMT_TILE_A5X, 0, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format = FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
"SDE/Y_CBCR_H2V2_UBWC",
SDE_MDP_FMT_TILE_A5X, SDE_MDP_CHROMA_420,
SDE_MDP_PIXEL_NORMAL,
0, C1_B_Cb, C2_R_Cr,
SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 8,
.tile_width = 32,
},
},
{
.mdp_format = FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_UBWC,
"SDE/RGBA_1010102_UBWC",
SDE_MDP_FMT_TILE_A5X, 0, 1,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format = FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_UBWC,
"SDE/RGBX_1010102_UBWC",
SDE_MDP_FMT_TILE_A5X, 0, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format = FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
"SDE/Y_CBCR_H2V2_TP10_UBWC",
SDE_MDP_FMT_TILE_A5X, SDE_MDP_CHROMA_420,
SDE_MDP_PIXEL_10BIT,
0,
C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_UBWC),
.micro = {
.tile_height = 4,
.tile_width = 48,
},
},
{
.mdp_format = {
FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC),
.description = "SDE/Y_CBCR_H2V2_P010_UBWC",
.flag = 0,
.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
.chroma_sample = SDE_MDP_CHROMA_420,
.unpack_count = 2,
.bpp = 2,
.frame_format = SDE_MDP_FMT_TILE_A5X,
.pixel_mode = SDE_MDP_PIXEL_10BIT,
.element = { C1_B_Cb, C2_R_Cr },
.unpack_tight = 0,
.unpack_align_msb = 1,
.is_ubwc = SDE_MDP_COMPRESS_UBWC
},
.micro = {
.tile_height = 4,
.tile_width = 32,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
"SDE/RGBA_1010102_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_TILE,
"SDE/RGBX_1010102102_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102_TILE,
"SDE/BGRA_1010102_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102_TILE,
"SDE/BGRX_1010102_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010_TILE,
"SDE/ARGB_2101010_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010_TILE,
"SDE/XRGB_2101010_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010_TILE,
"SDE/ABGR_2101010_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010_TILE,
"SDE/XBGR_2101010_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
"Y_CRCB_H2V2_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
SDE_MDP_FORMAT_FLAG_PRIVATE,
C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 8,
.tile_width = 32,
},
},
{
.mdp_format =
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
"Y_CBCR_H2V2_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
SDE_MDP_FORMAT_FLAG_PRIVATE,
C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 8,
.tile_width = 32,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_ABGR_8888_TILE,
"SDE/ABGR_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_XRGB_8888_TILE,
"SDE/XRGB_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 32,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_ARGB_8888_TILE,
"SDE/ARGB_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_TILE,
"SDE/RGBA_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_TILE,
"SDE/RGBX_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_BGRA_8888_TILE,
"SDE/BGRA_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_BGRX_8888_TILE,
"SDE/BGRX_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format =
FMT_RGB_8888(SDE_PIX_FMT_XBGR_8888_TILE,
"SDE/XBGR_8888_TILE",
SDE_MDP_FMT_TILE_A5X,
SDE_MDP_FORMAT_FLAG_PRIVATE,
0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
.micro = {
.tile_height = 4,
.tile_width = 16,
},
},
{
.mdp_format = {
FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE),
.description = "SDE/Y_CBCR_H2V2_P010_TILE",
.flag = SDE_MDP_FORMAT_FLAG_PRIVATE,
.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
.chroma_sample = SDE_MDP_CHROMA_420,
.unpack_count = 2,
.bpp = 2,
.frame_format = SDE_MDP_FMT_TILE_A5X,
.pixel_mode = SDE_MDP_PIXEL_10BIT,
.element = { C1_B_Cb, C2_R_Cr },
.unpack_tight = 0,
.unpack_align_msb = 1,
.is_ubwc = SDE_MDP_COMPRESS_NONE,
},
.micro = {
.tile_height = 4,
.tile_width = 32,
},
},
};
static struct sde_mdp_format_params sde_mdp_format_map[] = {
FMT_RGB_565(
SDE_PIX_FMT_RGB_565, "RGB_565", SDE_MDP_FMT_LINEAR,
0, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
FMT_RGB_565(
SDE_PIX_FMT_BGR_565, "BGR_565", SDE_MDP_FMT_LINEAR,
0, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
FMT_RGB_888(
SDE_PIX_FMT_RGB_888, "RGB_888", SDE_MDP_FMT_LINEAR,
0, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
FMT_RGB_888(
SDE_PIX_FMT_BGR_888, "BGR_888", SDE_MDP_FMT_LINEAR,
0, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_ABGR_8888, "SDE/ABGR_8888", SDE_MDP_FMT_LINEAR,
0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_XRGB_8888, "SDE/XRGB_8888", SDE_MDP_FMT_LINEAR,
0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_ARGB_8888, "SDE/ARGB_8888", SDE_MDP_FMT_LINEAR,
0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_RGBA_8888, "SDE/RGBA_8888", SDE_MDP_FMT_LINEAR,
0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_RGBX_8888, "SDE/RGBX_8888", SDE_MDP_FMT_LINEAR,
0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_BGRA_8888, "SDE/BGRA_8888", SDE_MDP_FMT_LINEAR,
0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_BGRX_8888, "SDE/BGRX_8888", SDE_MDP_FMT_LINEAR,
0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_8888(
SDE_PIX_FMT_XBGR_8888, "SDE/XBGR_8888", SDE_MDP_FMT_LINEAR,
0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V1, "Y_CRCB_H2V1",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_H2V1, SDE_MDP_PIXEL_NORMAL,
0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V1, "Y_CBCR_H2V1",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_H2V1, SDE_MDP_PIXEL_NORMAL,
0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H1V2, "Y_CRCB_H1V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_H1V2, SDE_MDP_PIXEL_NORMAL,
0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H1V2, "Y_CBCR_H1V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_H1V2, SDE_MDP_PIXEL_NORMAL,
0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2, "Y_CRCB_H2V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2, "Y_CBCR_H2V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_VENUS, "SDE/Y_CBCR_H2V2_VENUS",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
0, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_VENUS, "SDE/Y_CRCB_H2V2_VENUS",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
0, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
{
FMT_YUV10_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010),
.description = "SDE/Y_CBCR_H2V2_P010",
.flag = 0,
.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
.chroma_sample = SDE_MDP_CHROMA_420,
.unpack_count = 2,
.bpp = 2,
.frame_format = SDE_MDP_FMT_LINEAR,
.pixel_mode = SDE_MDP_PIXEL_10BIT,
.element = { C1_B_Cb, C2_R_Cr },
.unpack_tight = 0,
.unpack_align_msb = 1,
.is_ubwc = SDE_MDP_COMPRESS_NONE,
},
{
FMT_YUV10_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS),
.description = "SDE/Y_CBCR_H2V2_P010_VENUS",
.flag = 0,
.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
.chroma_sample = SDE_MDP_CHROMA_420,
.unpack_count = 2,
.bpp = 2,
.frame_format = SDE_MDP_FMT_LINEAR,
.pixel_mode = SDE_MDP_PIXEL_10BIT,
.element = { C1_B_Cb, C2_R_Cr },
.unpack_tight = 0,
.unpack_align_msb = 1,
.is_ubwc = SDE_MDP_COMPRESS_NONE,
},
{
FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_TP10),
.description = "SDE/Y_CBCR_H2V2_TP10",
.flag = 0,
.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
.chroma_sample = SDE_MDP_CHROMA_420,
.unpack_count = 2,
.bpp = 2,
.frame_format = SDE_MDP_FMT_TILE_A5X,
.pixel_mode = SDE_MDP_PIXEL_10BIT,
.element = { C1_B_Cb, C2_R_Cr },
.unpack_tight = 1,
.unpack_align_msb = 0,
.is_ubwc = SDE_MDP_COMPRESS_NONE,
},
FMT_YUV_PLANR(SDE_PIX_FMT_Y_CB_CR_H2V2, "Y_CB_CR_H2V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, 0, C2_R_Cr, C1_B_Cb),
FMT_YUV_PLANR(SDE_PIX_FMT_Y_CR_CB_H2V2, "Y_CR_CB_H2V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, 0, C1_B_Cb, C2_R_Cr),
FMT_YUV_PLANR(SDE_PIX_FMT_Y_CR_CB_GH2V2, "SDE/Y_CR_CB_GH2V2",
SDE_MDP_FMT_LINEAR,
SDE_MDP_CHROMA_420, 0, C1_B_Cb, C2_R_Cr),
{
FMT_YUV_COMMON(SDE_PIX_FMT_YCBYCR_H2V1),
.description = "YCBYCR_H2V1",
.flag = 0,
.fetch_planes = SDE_MDP_PLANE_INTERLEAVED,
.chroma_sample = SDE_MDP_CHROMA_H2V1,
.unpack_count = 4,
.bpp = 2,
.frame_format = SDE_MDP_FMT_LINEAR,
.pixel_mode = SDE_MDP_PIXEL_NORMAL,
.element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
.is_ubwc = SDE_MDP_COMPRESS_NONE,
},
FMT_RGB_1555(SDE_PIX_FMT_RGBA_5551, "RGBA_5551", 1, 0,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
FMT_RGB_1555(SDE_PIX_FMT_ARGB_1555, "ARGB_1555", 1, 0,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
FMT_RGB_1555(SDE_PIX_FMT_ABGR_1555, "ABGR_1555", 1, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
FMT_RGB_1555(SDE_PIX_FMT_BGRA_5551, "BGRA_5551", 1, 0,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
FMT_RGB_1555(SDE_PIX_FMT_BGRX_5551, "BGRX_5551", 0, 0,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
FMT_RGB_1555(SDE_PIX_FMT_RGBX_5551, "RGBX_5551", 0, 0,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
FMT_RGB_1555(SDE_PIX_FMT_XBGR_1555, "XBGR_1555", 0, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
FMT_RGB_1555(SDE_PIX_FMT_XRGB_1555, "XRGB_1555", 0, 0,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
FMT_RGB_4444(SDE_PIX_FMT_RGBA_4444, "RGBA_4444", 1, 0,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
FMT_RGB_4444(SDE_PIX_FMT_ARGB_4444, "ARGB_4444", 1, 0,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
FMT_RGB_4444(SDE_PIX_FMT_BGRA_4444, "BGRA_4444", 1, 0,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
FMT_RGB_4444(SDE_PIX_FMT_ABGR_4444, "ABGR_4444", 1, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
FMT_RGB_4444(SDE_PIX_FMT_RGBX_4444, "RGBX_4444", 0, 0,
C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
FMT_RGB_4444(SDE_PIX_FMT_XRGB_4444, "XRGB_4444", 0, 0,
C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
FMT_RGB_4444(SDE_PIX_FMT_BGRX_4444, "BGRX_4444", 0, 0,
C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
FMT_RGB_4444(SDE_PIX_FMT_XBGR_4444, "XBGR_4444", 0, 0,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102, "SDE/RGBA_1010102",
SDE_MDP_FMT_LINEAR,
0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102, "SDE/RGBX_1010102",
SDE_MDP_FMT_LINEAR,
0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102, "SDE/BGRA_1010102",
SDE_MDP_FMT_LINEAR,
0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102, "SDE/BGRX_1010102",
SDE_MDP_FMT_LINEAR,
0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010, "SDE/ARGB_2101010",
SDE_MDP_FMT_LINEAR,
0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010, "SDE/XRGB_2101010",
SDE_MDP_FMT_LINEAR,
0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010, "SDE/ABGR_2101010",
SDE_MDP_FMT_LINEAR,
0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010, "SDE/XBGR_2101010",
SDE_MDP_FMT_LINEAR,
0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
SDE_MDP_COMPRESS_NONE),
};
/*
* sde_get_format_params - return format parameter of the given format
* @format: format to lookup
*/
struct sde_mdp_format_params *sde_get_format_params(u32 format)
{
struct sde_mdp_format_params *fmt = NULL;
int i;
bool fmt_found = false;
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_map); i++) {
fmt = &sde_mdp_format_map[i];
if (format == fmt->format) {
fmt_found = true;
break;
}
}
if (!fmt_found) {
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
if (format == fmt->format) {
fmt_found = true;
break;
}
}
}
/* If format not supported than return NULL */
if (!fmt_found)
fmt = NULL;
return fmt;
}
/*
* sde_rot_get_ubwc_micro_dim - return micro dimension of the given ubwc format
* @format: format to lookup
* @w: Pointer to returned width dimension
* @h: Pointer to returned height dimension
*/
int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h)
{
struct sde_mdp_format_params_ubwc *fmt = NULL;
bool fmt_found = false;
int i;
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
fmt = &sde_mdp_format_ubwc_map[i];
if (format == fmt->mdp_format.format) {
fmt_found = true;
break;
}
}
if (!fmt_found)
return -EINVAL;
*w = fmt->micro.tile_width;
*h = fmt->micro.tile_height;
return 0;
}
/*
* sde_rot_get_tilea5x_pixfmt - get base a5x tile format of given source format
* @src_pixfmt: source pixel format to be converted
* @dst_pixfmt: pointer to base a5x tile pixel format
* return: 0 if success; error code otherwise
*/
int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt)
{
int rc = 0;
if (!dst_pixfmt) {
SDEROT_ERR("invalid parameters\n");
return -EINVAL;
}
switch (src_pixfmt) {
case SDE_PIX_FMT_Y_CBCR_H2V2:
case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
case SDE_PIX_FMT_Y_CBCR_H2V2_TILE:
*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
break;
case SDE_PIX_FMT_Y_CRCB_H2V2:
case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
*dst_pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2_TILE;
break;
case V4L2_PIX_FMT_RGB565:
case SDE_PIX_FMT_RGB_565_UBWC:
case SDE_PIX_FMT_RGB_565_TILE:
*dst_pixfmt = SDE_PIX_FMT_RGB_565_TILE;
break;
case SDE_PIX_FMT_RGBA_8888:
case SDE_PIX_FMT_RGBA_8888_UBWC:
case SDE_PIX_FMT_RGBA_8888_TILE:
*dst_pixfmt = SDE_PIX_FMT_RGBA_8888_TILE;
break;
case SDE_PIX_FMT_RGBX_8888:
case SDE_PIX_FMT_RGBX_8888_UBWC:
case SDE_PIX_FMT_RGBX_8888_TILE:
*dst_pixfmt = SDE_PIX_FMT_RGBX_8888_TILE;
break;
case SDE_PIX_FMT_ARGB_8888:
case SDE_PIX_FMT_ARGB_8888_TILE:
*dst_pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
break;
case SDE_PIX_FMT_XRGB_8888:
case SDE_PIX_FMT_XRGB_8888_TILE:
*dst_pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
break;
case SDE_PIX_FMT_ABGR_8888:
case SDE_PIX_FMT_ABGR_8888_TILE:
*dst_pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
break;
case SDE_PIX_FMT_XBGR_8888:
case SDE_PIX_FMT_XBGR_8888_TILE:
*dst_pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
break;
case SDE_PIX_FMT_ARGB_2101010:
case SDE_PIX_FMT_ARGB_2101010_TILE:
*dst_pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
break;
case SDE_PIX_FMT_XRGB_2101010:
case SDE_PIX_FMT_XRGB_2101010_TILE:
*dst_pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
break;
case SDE_PIX_FMT_ABGR_2101010:
case SDE_PIX_FMT_ABGR_2101010_TILE:
*dst_pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
break;
case SDE_PIX_FMT_XBGR_2101010:
case SDE_PIX_FMT_XBGR_2101010_TILE:
*dst_pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
break;
case SDE_PIX_FMT_BGRA_1010102:
case SDE_PIX_FMT_BGRA_1010102_TILE:
*dst_pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
break;
case SDE_PIX_FMT_BGRX_1010102:
case SDE_PIX_FMT_BGRX_1010102_TILE:
*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
break;
case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
case SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE:
case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE;
break;
case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
break;
default:
SDEROT_ERR("invalid src pixel format %c%c%c%c\n",
src_pixfmt >> 0, src_pixfmt >> 8,
src_pixfmt >> 16, src_pixfmt >> 24);
rc = -EINVAL;
break;
}
return rc;
}

ファイルの表示

@@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef SDE_ROTATOR_FORMATS_H
#define SDE_ROTATOR_FORMATS_H
#include <linux/types.h>
#include <media/msm_sde_rotator.h>
/* Internal rotator pixel formats */
#define SDE_PIX_FMT_RGBA_8888_TILE v4l2_fourcc('Q', 'T', '0', '0')
#define SDE_PIX_FMT_RGBX_8888_TILE v4l2_fourcc('Q', 'T', '0', '1')
#define SDE_PIX_FMT_BGRA_8888_TILE v4l2_fourcc('Q', 'T', '0', '2')
#define SDE_PIX_FMT_BGRX_8888_TILE v4l2_fourcc('Q', 'T', '0', '3')
#define SDE_PIX_FMT_ARGB_8888_TILE v4l2_fourcc('Q', 'T', '0', '4')
#define SDE_PIX_FMT_XRGB_8888_TILE v4l2_fourcc('Q', 'T', '0', '5')
#define SDE_PIX_FMT_ABGR_8888_TILE v4l2_fourcc('Q', 'T', '0', '6')
#define SDE_PIX_FMT_XBGR_8888_TILE v4l2_fourcc('Q', 'T', '0', '7')
#define SDE_PIX_FMT_Y_CBCR_H2V2_TILE v4l2_fourcc('Q', 'T', '0', '8')
#define SDE_PIX_FMT_Y_CRCB_H2V2_TILE v4l2_fourcc('Q', 'T', '0', '9')
#define SDE_PIX_FMT_ARGB_2101010_TILE v4l2_fourcc('Q', 'T', '0', 'A')
#define SDE_PIX_FMT_XRGB_2101010_TILE v4l2_fourcc('Q', 'T', '0', 'B')
#define SDE_PIX_FMT_ABGR_2101010_TILE v4l2_fourcc('Q', 'T', '0', 'C')
#define SDE_PIX_FMT_XBGR_2101010_TILE v4l2_fourcc('Q', 'T', '0', 'D')
#define SDE_PIX_FMT_BGRA_1010102_TILE v4l2_fourcc('Q', 'T', '0', 'E')
#define SDE_PIX_FMT_BGRX_1010102_TILE v4l2_fourcc('Q', 'T', '0', 'F')
#define SDE_PIX_FMT_RGBA_1010102_TILE v4l2_fourcc('Q', 'T', '1', '0')
#define SDE_PIX_FMT_RGBX_1010102_TILE v4l2_fourcc('Q', 'T', '1', '1')
#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE v4l2_fourcc('Q', 'T', '1', '2')
#define SDE_PIX_FMT_RGB_565_TILE v4l2_fourcc('Q', 'T', '1', '3')
#define SDE_ROT_MAX_PLANES 4
#define UBWC_META_MACRO_W_H 16
#define UBWC_META_BLOCK_SIZE 256
/*
* Value of enum chosen to fit the number of bits
* expected by the HW programming.
*/
enum {
SDE_COLOR_4BIT,
SDE_COLOR_5BIT,
SDE_COLOR_6BIT,
SDE_COLOR_8BIT,
SDE_COLOR_ALPHA_1BIT = 0,
SDE_COLOR_ALPHA_4BIT = 1,
};
#define C3_ALPHA 3 /* alpha */
#define C2_R_Cr 2 /* R/Cr */
#define C1_B_Cb 1 /* B/Cb */
#define C0_G_Y 0 /* G/luma */
enum sde_mdp_compress_type {
SDE_MDP_COMPRESS_NONE,
SDE_MDP_COMPRESS_UBWC,
};
enum sde_mdp_frame_format_type {
SDE_MDP_FMT_LINEAR,
SDE_MDP_FMT_TILE_A4X,
SDE_MDP_FMT_TILE_A5X,
};
enum sde_mdp_pixel_type {
SDE_MDP_PIXEL_NORMAL,
SDE_MDP_PIXEL_10BIT,
};
enum sde_mdp_sspp_fetch_type {
SDE_MDP_PLANE_INTERLEAVED,
SDE_MDP_PLANE_PLANAR,
SDE_MDP_PLANE_PSEUDO_PLANAR,
};
enum sde_mdp_sspp_chroma_samp_type {
SDE_MDP_CHROMA_RGB,
SDE_MDP_CHROMA_H2V1,
SDE_MDP_CHROMA_H1V2,
SDE_MDP_CHROMA_420
};
enum sde_mdp_format_flag_type {
SDE_MDP_FORMAT_FLAG_PRIVATE = BIT(0)
};
struct sde_mdp_format_params {
u32 format;
const char *description;
u32 flag;
u8 is_yuv;
u8 is_ubwc;
u8 frame_format;
u8 chroma_sample;
u8 solid_fill;
u8 fetch_planes;
u8 unpack_align_msb; /* 0 to LSB, 1 to MSB */
u8 unpack_tight; /* 0 for loose, 1 for tight */
u8 unpack_count; /* 0 = 1 component, 1 = 2 component ... */
u8 bpp;
u8 alpha_enable; /* source has alpha */
u8 pixel_mode; /* 0: normal, 1:10bit */
u8 bits[SDE_ROT_MAX_PLANES];
u8 element[SDE_ROT_MAX_PLANES];
};
struct sde_mdp_format_ubwc_tile_info {
u16 tile_height;
u16 tile_width;
};
struct sde_mdp_format_params_ubwc {
struct sde_mdp_format_params mdp_format;
struct sde_mdp_format_ubwc_tile_info micro;
};
struct sde_mdp_format_params *sde_get_format_params(u32 format);
int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h);
int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt);
static inline bool sde_mdp_is_tilea4x_format(struct sde_mdp_format_params *fmt)
{
return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A4X);
}
static inline bool sde_mdp_is_tilea5x_format(struct sde_mdp_format_params *fmt)
{
return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A5X);
}
static inline bool sde_mdp_is_ubwc_format(struct sde_mdp_format_params *fmt)
{
return fmt && (fmt->is_ubwc == SDE_MDP_COMPRESS_UBWC);
}
static inline bool sde_mdp_is_linear_format(struct sde_mdp_format_params *fmt)
{
return fmt && (fmt->frame_format == SDE_MDP_FMT_LINEAR);
}
static inline bool sde_mdp_is_nv12_format(struct sde_mdp_format_params *fmt)
{
return fmt && (fmt->fetch_planes == SDE_MDP_PLANE_PSEUDO_PLANAR) &&
(fmt->chroma_sample == SDE_MDP_CHROMA_420);
}
static inline bool sde_mdp_is_nv12_8b_format(struct sde_mdp_format_params *fmt)
{
return fmt && sde_mdp_is_nv12_format(fmt) &&
(fmt->pixel_mode == SDE_MDP_PIXEL_NORMAL);
}
static inline bool sde_mdp_is_nv12_10b_format(struct sde_mdp_format_params *fmt)
{
return fmt && sde_mdp_is_nv12_format(fmt) &&
(fmt->pixel_mode == SDE_MDP_PIXEL_10BIT);
}
static inline bool sde_mdp_is_tp10_format(struct sde_mdp_format_params *fmt)
{
return fmt && sde_mdp_is_nv12_10b_format(fmt) &&
fmt->unpack_tight;
}
static inline bool sde_mdp_is_p010_format(struct sde_mdp_format_params *fmt)
{
return fmt && sde_mdp_is_nv12_10b_format(fmt) &&
!fmt->unpack_tight;
}
static inline bool sde_mdp_is_yuv_format(struct sde_mdp_format_params *fmt)
{
return fmt && fmt->is_yuv;
}
static inline bool sde_mdp_is_rgb_format(struct sde_mdp_format_params *fmt)
{
return !sde_mdp_is_yuv_format(fmt);
}
static inline bool sde_mdp_is_private_format(struct sde_mdp_format_params *fmt)
{
return fmt && (fmt->flag & SDE_MDP_FORMAT_FLAG_PRIVATE);
}
static inline int sde_mdp_format_blk_size(struct sde_mdp_format_params *fmt)
{
return sde_mdp_is_tp10_format(fmt) ? 96 : 128;
}
#endif

ファイルの表示

@@ -0,0 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef SDE_ROTATOR_HWIO_H
#define SDE_ROTATOR_HWIO_H
#include <linux/bitops.h>
#define SDE_REG_HW_VERSION 0x0
#define SDE_REG_HW_INTR_STATUS 0x10
#define SDE_INTR_MDP BIT(0)
#define SDE_MDP_OFFSET 0x1000
#define MMSS_MDP_PANIC_ROBUST_CTRL 0x00178
#define MMSS_MDP_PANIC_LUT0 0x0017C
#define MMSS_MDP_PANIC_LUT1 0x00180
#define MMSS_MDP_ROBUST_LUT 0x00184
#define MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL 0x00190
/* following offsets are with respect to MDP VBIF base */
#define MMSS_VBIF_CLKON 0x4
#define MMSS_VBIF_RD_LIM_CONF 0x0B0
#define MMSS_VBIF_WR_LIM_CONF 0x0C0
#define MMSS_VBIF_XIN_HALT_CTRL0 0x200
#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
#define MMSS_VBIF_AXI_HALT_CTRL0 0x208
#define MMSS_VBIF_AXI_HALT_CTRL1 0x20C
#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
#define MMSS_VBIF_TEST_BUS_OUT 0x230
#define SDE_VBIF_QOS_REMAP_BASE 0x020
#define SDE_VBIF_QOS_REMAP_ENTRIES 0x4
#define SDE_VBIF_FIXED_SORT_EN 0x30
#define SDE_VBIF_FIXED_SORT_SEL0 0x34
/* MMSS_VBIF_NRT - offset relative to base offset */
#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0 0x0008
#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0 0
#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1 1
#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1 0x000C
#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 0x0020
#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_01 0x0024
#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_10 0x0028
#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_11 0x002C
#define MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN 0x00AC
#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF0 0x00B0
#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF1 0x00B4
#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF2 0x00B8
#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF0 0x00C0
#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF1 0x00C4
#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2 0x00C8
#define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0 0x00D0
#define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0 0x00D4
#define MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 0x0550
#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 0x0590
#define SDE_MDP_REG_TRAFFIC_SHAPER_EN BIT(31)
#define SDE_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
#define SDE_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
#define SDE_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
#endif

ファイルの表示

@@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_INLINE_H__
#define __SDE_ROTATOR_INLINE_H__
#include <linux/types.h>
#include <linux/dma-buf.h>
#include <linux/platform_device.h>
#include "sde_rotator_formats.h"
#define SDE_ROTATOR_INLINE_PLANE_MAX 4
/*
* enum sde_rotator_inline_cmd_type - inline rotator command stages
* @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
* @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
* @SDE_ROTATOR_INLINE_CMD_START: ready to start inline rotation
* @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
* @SDE_ROTATOR_INLINE_CMD_ABORT: abort current commit and reset
*/
enum sde_rotator_inline_cmd_type {
SDE_ROTATOR_INLINE_CMD_VALIDATE,
SDE_ROTATOR_INLINE_CMD_COMMIT,
SDE_ROTATOR_INLINE_CMD_START,
SDE_ROTATOR_INLINE_CMD_CLEANUP,
SDE_ROTATOR_INLINE_CMD_ABORT,
};
/**
* sde_rotator_inline_cmd - inline rotation command
* @sequence_id: unique command sequence identifier
* @video_mode: true if video interface is connected
* @fps: frame rate in frame-per-second
* @rot90: rotate 90 counterclockwise
* @hflip: horizontal flip prior to rotation
* @vflip: vertical flip prior to rotation
* @secure: true if buffer is in secure domain
* @prefill_bw: prefill bandwidth in Bps
* @clkrate: clock rate in Hz
* @data_bw: data bus bandwidth in Bps
* @src_addr: source i/o buffer virtual address
* @src_len: source i/o buffer length
* @src_planes: source plane number
* @src_pixfmt: v4l2 fourcc pixel format of source buffer
* @src_width: width of source buffer
* @src_height: height of source buffer
* @src_rect_x: roi x coordinate of source buffer
* @src_rect_y: roi y coordinate of source buffer
* @src_rect_w: roi width of source buffer
* @src_rect_h: roi height of source buffer
* @dst_addr: destination i/o virtual buffer address
* @dst_len: destination i/o buffer length
* @dst_planes: destination plane number
* @dst_pixfmt: v4l2 fourcc pixel format of destination buffer
* @dst_rect_x: roi x coordinate of destination buffer
* @dst_rect_y: roi y coordinate of destination buffer
* @dst_rect_w: roi width of destination buffer
* @dst_rect_h: roi height of destination buffer
* @dst_writeback: true if cache writeback is required
* @priv_handle: private handle of rotator session
*/
struct sde_rotator_inline_cmd {
u32 sequence_id;
bool video_mode;
u32 fps;
bool rot90;
bool hflip;
bool vflip;
bool secure;
u64 prefill_bw;
u64 clkrate;
u64 data_bw;
dma_addr_t src_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
u32 src_len[SDE_ROTATOR_INLINE_PLANE_MAX];
u32 src_planes;
u32 src_pixfmt;
u32 src_width;
u32 src_height;
u32 src_rect_x;
u32 src_rect_y;
u32 src_rect_w;
u32 src_rect_h;
dma_addr_t dst_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
u32 dst_len[SDE_ROTATOR_INLINE_PLANE_MAX];
u32 dst_planes;
u32 dst_pixfmt;
u32 dst_rect_x;
u32 dst_rect_y;
u32 dst_rect_w;
u32 dst_rect_h;
bool dst_writeback;
void *priv_handle;
};
#if IS_ENABLED(CONFIG_MSM_SDE_ROTATOR)
void *sde_rotator_inline_open(struct platform_device *pdev);
int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
u32 src_pixfmt, u32 *dst_pixfmt);
int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
char *downscale_caps, int len);
int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev);
int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
bool input, u32 *pixfmt, int len);
int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
enum sde_rotator_inline_cmd_type cmd_type);
int sde_rotator_inline_release(void *handle);
void sde_rotator_inline_reg_dump(struct platform_device *pdev);
#else
void *sde_rotator_inline_open(struct platform_device *pdev)
{
return NULL;
}
int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
u32 src_pixfmt, u32 *dst_pixfmt)
{
return 0;
}
int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
char *downscale_caps, int len)
{
return 0;
}
int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
{
return 0;
}
int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
bool input, u32 *pixfmt, int len)
{
return 0;
}
int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
enum sde_rotator_inline_cmd_type cmd_type)
{
return 0;
}
int sde_rotator_inline_release(void *handle)
{
return 0;
}
void sde_rotator_inline_reg_dump(struct platform_device *pdev)
{
}
#endif
#endif /* __SDE_ROTATOR_INLINE_H__ */

ファイルの表示

@@ -0,0 +1,423 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, 2015-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include "sde_rotator_io_util.h"
void sde_reg_w(struct sde_io_data *io, u32 offset, u32 value, u32 debug)
{
u32 in_val;
if (!io || !io->base) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return;
}
if (offset > io->len) {
DEV_ERR("%pS->%s: offset out of range\n",
__builtin_return_address(0), __func__);
return;
}
DEV_DBG("sdeio:%6.6x:%8.8x\n", offset, value);
writel_relaxed(value, io->base + offset);
if (debug) {
/* ensure register read is ordered after register write */
mb();
in_val = readl_relaxed(io->base + offset);
DEV_DBG("[%08x] => %08x [%08x]\n",
(u32)(unsigned long)(io->base + offset),
value, in_val);
}
} /* sde_reg_w */
u32 sde_reg_r(struct sde_io_data *io, u32 offset, u32 debug)
{
u32 value;
if (!io || !io->base) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
if (offset > io->len) {
DEV_ERR("%pS->%s: offset out of range\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
value = readl_relaxed(io->base + offset);
if (debug)
DEV_DBG("[%08x] <= %08x\n",
(u32)(unsigned long)(io->base + offset), value);
DEV_DBG("sdeio:%6.6x:%8.8x\n", offset, value);
return value;
} /* sde_reg_r */
void sde_reg_dump(void __iomem *base, u32 length, const char *prefix,
u32 debug)
{
if (debug)
print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
(void *)base, length, false);
} /* sde_reg_dump */
static struct resource *sde_rot_get_res_byname(struct platform_device *pdev,
unsigned int type, const char *name)
{
struct resource *res = NULL;
res = platform_get_resource_byname(pdev, type, name);
if (!res)
DEV_ERR("%s: '%s' resource not found\n", __func__, name);
return res;
} /* sde_rot_get_res_byname */
int sde_rot_ioremap_byname(struct platform_device *pdev,
struct sde_io_data *io_data, const char *name)
{
struct resource *res = NULL;
if (!pdev || !io_data) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
res = sde_rot_get_res_byname(pdev, IORESOURCE_MEM, name);
if (!res) {
DEV_ERR("%pS->%s: '%s' sde_rot_get_res_byname failed\n",
__builtin_return_address(0), __func__, name);
return -ENODEV;
}
io_data->len = (u32)resource_size(res);
io_data->base = ioremap(res->start, io_data->len);
if (!io_data->base) {
DEV_ERR("%pS->%s: '%s' ioremap failed\n",
__builtin_return_address(0), __func__, name);
return -EIO;
}
return 0;
} /* sde_rot_ioremap_byname */
void sde_rot_iounmap(struct sde_io_data *io_data)
{
if (!io_data) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return;
}
if (io_data->base) {
iounmap(io_data->base);
io_data->base = NULL;
}
io_data->len = 0;
} /* sde_rot_iounmap */
int sde_rot_config_vreg(struct device *dev, struct sde_vreg *in_vreg,
int num_vreg, int config)
{
int i = 0, rc = 0;
struct sde_vreg *curr_vreg = NULL;
enum sde_vreg_type type;
if (!dev || !in_vreg || !num_vreg) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
if (config) {
for (i = 0; i < num_vreg; i++) {
curr_vreg = &in_vreg[i];
curr_vreg->vreg = regulator_get(dev,
curr_vreg->vreg_name);
rc = PTR_ERR_OR_ZERO(curr_vreg->vreg);
if (rc) {
DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
__builtin_return_address(0), __func__,
curr_vreg->vreg_name, rc);
curr_vreg->vreg = NULL;
goto vreg_get_fail;
}
type = (regulator_count_voltages(curr_vreg->vreg) > 0)
? SDE_REG_LDO : SDE_REG_VS;
if (type == SDE_REG_LDO) {
rc = regulator_set_voltage(
curr_vreg->vreg,
curr_vreg->min_voltage,
curr_vreg->max_voltage);
if (rc < 0) {
DEV_ERR("%pS->%s: %s set vltg fail\n",
__builtin_return_address(0),
__func__,
curr_vreg->vreg_name);
goto vreg_set_voltage_fail;
}
}
}
} else {
for (i = num_vreg-1; i >= 0; i--) {
curr_vreg = &in_vreg[i];
if (curr_vreg->vreg) {
type = (regulator_count_voltages(
curr_vreg->vreg) > 0)
? SDE_REG_LDO : SDE_REG_VS;
if (type == SDE_REG_LDO) {
regulator_set_voltage(curr_vreg->vreg,
0, curr_vreg->max_voltage);
}
regulator_put(curr_vreg->vreg);
curr_vreg->vreg = NULL;
}
}
}
return 0;
vreg_unconfig:
if (type == SDE_REG_LDO)
regulator_set_load(curr_vreg->vreg, 0);
vreg_set_voltage_fail:
regulator_put(curr_vreg->vreg);
curr_vreg->vreg = NULL;
vreg_get_fail:
for (i--; i >= 0; i--) {
curr_vreg = &in_vreg[i];
type = (regulator_count_voltages(curr_vreg->vreg) > 0)
? SDE_REG_LDO : SDE_REG_VS;
goto vreg_unconfig;
}
return rc;
} /* sde_rot_config_vreg */
int sde_rot_enable_vreg(struct sde_vreg *in_vreg, int num_vreg, int enable)
{
int i = 0, rc = 0;
bool need_sleep;
if (!in_vreg) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
if (enable) {
for (i = 0; i < num_vreg; i++) {
rc = PTR_ERR_OR_ZERO(in_vreg[i].vreg);
if (rc) {
DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
__builtin_return_address(0), __func__,
in_vreg[i].vreg_name, rc);
goto vreg_set_opt_mode_fail;
}
need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
if (in_vreg[i].pre_on_sleep && need_sleep)
usleep_range(in_vreg[i].pre_on_sleep * 1000,
in_vreg[i].pre_on_sleep * 1000);
rc = regulator_set_load(in_vreg[i].vreg,
in_vreg[i].enable_load);
if (rc < 0) {
DEV_ERR("%pS->%s: %s set opt m fail\n",
__builtin_return_address(0), __func__,
in_vreg[i].vreg_name);
goto vreg_set_opt_mode_fail;
}
rc = regulator_enable(in_vreg[i].vreg);
if (in_vreg[i].post_on_sleep && need_sleep)
usleep_range(in_vreg[i].post_on_sleep * 1000,
in_vreg[i].post_on_sleep * 1000);
if (rc < 0) {
DEV_ERR("%pS->%s: %s enable failed\n",
__builtin_return_address(0), __func__,
in_vreg[i].vreg_name);
goto disable_vreg;
}
}
} else {
for (i = num_vreg-1; i >= 0; i--) {
if (in_vreg[i].pre_off_sleep)
usleep_range(in_vreg[i].pre_off_sleep * 1000,
in_vreg[i].pre_off_sleep * 1000);
regulator_disable(in_vreg[i].vreg);
if (in_vreg[i].post_off_sleep)
usleep_range(in_vreg[i].post_off_sleep * 1000,
in_vreg[i].post_off_sleep * 1000);
regulator_set_load(in_vreg[i].vreg,
in_vreg[i].disable_load);
}
}
return rc;
disable_vreg:
regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
vreg_set_opt_mode_fail:
for (i--; i >= 0; i--) {
if (in_vreg[i].pre_off_sleep)
usleep_range(in_vreg[i].pre_off_sleep * 1000,
in_vreg[i].pre_off_sleep * 1000);
regulator_disable(in_vreg[i].vreg);
if (in_vreg[i].post_off_sleep)
usleep_range(in_vreg[i].post_off_sleep * 1000,
in_vreg[i].post_off_sleep * 1000);
regulator_set_load(in_vreg[i].vreg,
in_vreg[i].disable_load);
}
return rc;
} /* sde_rot_enable_vreg */
void sde_rot_put_clk(struct sde_clk *clk_arry, int num_clk)
{
int i;
if (!clk_arry) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return;
}
for (i = num_clk - 1; i >= 0; i--) {
if (clk_arry[i].clk)
clk_put(clk_arry[i].clk);
clk_arry[i].clk = NULL;
}
} /* sde_rot_put_clk */
int sde_rot_get_clk(struct device *dev, struct sde_clk *clk_arry, int num_clk)
{
int i, rc = 0;
if (!dev || !clk_arry) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
for (i = 0; i < num_clk; i++) {
clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
if (rc) {
DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name, rc);
goto error;
}
}
return rc;
error:
sde_rot_put_clk(clk_arry, num_clk);
return rc;
} /* sde_rot_get_clk */
int sde_rot_clk_set_rate(struct sde_clk *clk_arry, int num_clk)
{
int i, rc = 0;
if (!clk_arry) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
for (i = 0; i < num_clk; i++) {
if (clk_arry[i].clk) {
if (clk_arry[i].type != SDE_CLK_AHB) {
DEV_DBG("%pS->%s: '%s' rate %ld\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name,
clk_arry[i].rate);
rc = clk_set_rate(clk_arry[i].clk,
clk_arry[i].rate);
if (rc) {
DEV_ERR("%pS->%s: %s failed. rc=%d\n",
__builtin_return_address(0),
__func__,
clk_arry[i].clk_name, rc);
break;
}
}
} else {
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
rc = -EPERM;
break;
}
}
return rc;
} /* sde_rot_clk_set_rate */
int sde_rot_enable_clk(struct sde_clk *clk_arry, int num_clk, int enable)
{
int i, rc = 0;
if (!clk_arry) {
DEV_ERR("%pS->%s: invalid input\n",
__builtin_return_address(0), __func__);
return -EINVAL;
}
if (enable) {
for (i = 0; i < num_clk; i++) {
DEV_DBG("%pS->%s: enable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
if (clk_arry[i].clk) {
rc = clk_prepare_enable(clk_arry[i].clk);
if (rc)
DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
__builtin_return_address(0),
__func__,
clk_arry[i].clk_name, rc);
} else {
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
rc = -EPERM;
}
if (rc) {
sde_rot_enable_clk(&clk_arry[i],
i, false);
break;
}
}
} else {
for (i = num_clk - 1; i >= 0; i--) {
DEV_DBG("%pS->%s: disable '%s'\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
if (clk_arry[i].clk)
clk_disable_unprepare(clk_arry[i].clk);
else
DEV_ERR("%pS->%s: '%s' is not available\n",
__builtin_return_address(0), __func__,
clk_arry[i].clk_name);
}
}
return rc;
} /* sde_rot_enable_clk */

ファイルの表示

@@ -0,0 +1,98 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_IO_UTIL_H__
#define __SDE_ROTATOR_IO_UTIL_H__
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
#include <linux/types.h>
#ifdef DEBUG
#define DEV_DBG(fmt, args...) pr_err("<SDEROT_ERR> " fmt, ##args)
#else
#define DEV_DBG(fmt, args...) pr_debug("<SDEROT_DBG> " fmt, ##args)
#endif
#define DEV_INFO(fmt, args...) pr_info("<SDEROT_INFO> " fmt, ##args)
#define DEV_WARN(fmt, args...) pr_warn("<SDEROT_WARN> " fmt, ##args)
#define DEV_ERR(fmt, args...) pr_err("<SDEROT_ERR> " fmt, ##args)
struct sde_io_data {
u32 len;
void __iomem *base;
};
void sde_reg_w(struct sde_io_data *io, u32 offset, u32 value, u32 debug);
u32 sde_reg_r(struct sde_io_data *io, u32 offset, u32 debug);
void sde_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
#define SDE_REG_W_ND(io, offset, val) sde_reg_w(io, offset, val, false)
#define SDE_REG_W(io, offset, val) sde_reg_w(io, offset, val, true)
#define SDE_REG_R_ND(io, offset) sde_reg_r(io, offset, false)
#define SDE_REG_R(io, offset) sde_reg_r(io, offset, true)
enum sde_vreg_type {
SDE_REG_LDO,
SDE_REG_VS,
};
struct sde_vreg {
struct regulator *vreg; /* vreg handle */
char vreg_name[32];
int min_voltage;
int max_voltage;
int enable_load;
int disable_load;
int pre_on_sleep;
int post_on_sleep;
int pre_off_sleep;
int post_off_sleep;
};
struct sde_gpio {
unsigned int gpio;
unsigned int value;
char gpio_name[32];
};
enum sde_clk_type {
SDE_CLK_AHB, /* no set rate. rate controlled through rpm */
SDE_CLK_PCLK,
SDE_CLK_OTHER,
};
struct sde_clk {
struct clk *clk; /* clk handle */
char clk_name[32];
enum sde_clk_type type;
unsigned long rate;
};
struct sde_module_power {
unsigned int num_vreg;
struct sde_vreg *vreg_config;
unsigned int num_gpio;
struct sde_gpio *gpio_config;
unsigned int num_clk;
struct sde_clk *clk_config;
};
int sde_rot_ioremap_byname(struct platform_device *pdev,
struct sde_io_data *io_data, const char *name);
void sde_rot_iounmap(struct sde_io_data *io_data);
int sde_rot_config_vreg(struct device *dev, struct sde_vreg *in_vreg,
int num_vreg, int config);
int sde_rot_enable_vreg(struct sde_vreg *in_vreg, int num_vreg, int enable);
int sde_rot_get_clk(struct device *dev, struct sde_clk *clk_arry, int num_clk);
void sde_rot_put_clk(struct sde_clk *clk_arry, int num_clk);
int sde_rot_clk_set_rate(struct sde_clk *clk_arry, int num_clk);
int sde_rot_enable_clk(struct sde_clk *clk_arry, int num_clk, int enable);
#endif /* __SDE_ROTATOR_IO_UTIL_H__ */

ファイルの表示

@@ -0,0 +1,745 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include "sde_rotator_r1_hwio.h"
#include "sde_rotator_core.h"
#include "sde_rotator_util.h"
#include "sde_rotator_r1_internal.h"
#include "sde_rotator_r1.h"
#include "sde_rotator_r1_debug.h"
struct sde_mdp_hw_resource {
struct sde_rot_hw_resource hw;
struct sde_mdp_ctl *ctl;
struct sde_mdp_mixer *mixer;
struct sde_mdp_pipe *pipe;
struct sde_mdp_writeback *wb;
};
struct sde_rotator_r1_data {
struct sde_rot_mgr *mgr;
int wb_id;
int ctl_id;
int irq_num;
struct sde_mdp_hw_resource *mdp_hw;
};
static u32 sde_hw_rotator_input_pixfmts[] = {
SDE_PIX_FMT_XRGB_8888,
SDE_PIX_FMT_ARGB_8888,
SDE_PIX_FMT_ABGR_8888,
SDE_PIX_FMT_RGBA_8888,
SDE_PIX_FMT_BGRA_8888,
SDE_PIX_FMT_RGBX_8888,
SDE_PIX_FMT_BGRX_8888,
SDE_PIX_FMT_XBGR_8888,
SDE_PIX_FMT_RGBA_5551,
SDE_PIX_FMT_ARGB_1555,
SDE_PIX_FMT_ABGR_1555,
SDE_PIX_FMT_BGRA_5551,
SDE_PIX_FMT_BGRX_5551,
SDE_PIX_FMT_RGBX_5551,
SDE_PIX_FMT_XBGR_1555,
SDE_PIX_FMT_XRGB_1555,
SDE_PIX_FMT_ARGB_4444,
SDE_PIX_FMT_RGBA_4444,
SDE_PIX_FMT_BGRA_4444,
SDE_PIX_FMT_ABGR_4444,
SDE_PIX_FMT_RGBX_4444,
SDE_PIX_FMT_XRGB_4444,
SDE_PIX_FMT_BGRX_4444,
SDE_PIX_FMT_XBGR_4444,
SDE_PIX_FMT_RGB_888,
SDE_PIX_FMT_BGR_888,
SDE_PIX_FMT_RGB_565,
SDE_PIX_FMT_BGR_565,
SDE_PIX_FMT_Y_CB_CR_H2V2,
SDE_PIX_FMT_Y_CR_CB_H2V2,
SDE_PIX_FMT_Y_CR_CB_GH2V2,
SDE_PIX_FMT_Y_CBCR_H2V2,
SDE_PIX_FMT_Y_CRCB_H2V2,
SDE_PIX_FMT_Y_CBCR_H1V2,
SDE_PIX_FMT_Y_CRCB_H1V2,
SDE_PIX_FMT_Y_CBCR_H2V1,
SDE_PIX_FMT_Y_CRCB_H2V1,
SDE_PIX_FMT_YCBYCR_H2V1,
SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
SDE_PIX_FMT_RGBA_8888_UBWC,
SDE_PIX_FMT_RGBX_8888_UBWC,
SDE_PIX_FMT_RGB_565_UBWC,
SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
};
static u32 sde_hw_rotator_output_pixfmts[] = {
SDE_PIX_FMT_XRGB_8888,
SDE_PIX_FMT_ARGB_8888,
SDE_PIX_FMT_ABGR_8888,
SDE_PIX_FMT_RGBA_8888,
SDE_PIX_FMT_BGRA_8888,
SDE_PIX_FMT_RGBX_8888,
SDE_PIX_FMT_BGRX_8888,
SDE_PIX_FMT_XBGR_8888,
SDE_PIX_FMT_RGBA_5551,
SDE_PIX_FMT_ARGB_1555,
SDE_PIX_FMT_ABGR_1555,
SDE_PIX_FMT_BGRA_5551,
SDE_PIX_FMT_BGRX_5551,
SDE_PIX_FMT_RGBX_5551,
SDE_PIX_FMT_XBGR_1555,
SDE_PIX_FMT_XRGB_1555,
SDE_PIX_FMT_ARGB_4444,
SDE_PIX_FMT_RGBA_4444,
SDE_PIX_FMT_BGRA_4444,
SDE_PIX_FMT_ABGR_4444,
SDE_PIX_FMT_RGBX_4444,
SDE_PIX_FMT_XRGB_4444,
SDE_PIX_FMT_BGRX_4444,
SDE_PIX_FMT_XBGR_4444,
SDE_PIX_FMT_RGB_888,
SDE_PIX_FMT_BGR_888,
SDE_PIX_FMT_RGB_565,
SDE_PIX_FMT_BGR_565,
SDE_PIX_FMT_Y_CB_CR_H2V2,
SDE_PIX_FMT_Y_CR_CB_H2V2,
SDE_PIX_FMT_Y_CR_CB_GH2V2,
SDE_PIX_FMT_Y_CBCR_H2V2,
SDE_PIX_FMT_Y_CRCB_H2V2,
SDE_PIX_FMT_Y_CBCR_H1V2,
SDE_PIX_FMT_Y_CRCB_H1V2,
SDE_PIX_FMT_Y_CBCR_H2V1,
SDE_PIX_FMT_Y_CRCB_H2V1,
SDE_PIX_FMT_YCBYCR_H2V1,
SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
SDE_PIX_FMT_RGBA_8888_UBWC,
SDE_PIX_FMT_RGBX_8888_UBWC,
SDE_PIX_FMT_RGB_565_UBWC,
SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
};
static struct sde_mdp_hw_resource *sde_rotator_hw_alloc(
struct sde_rot_mgr *mgr, u32 ctl_id, u32 wb_id, int irq_num)
{
struct sde_mdp_hw_resource *mdp_hw;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int pipe_ndx, offset = ctl_id;
int ret = 0;
mdp_hw = devm_kzalloc(&mgr->pdev->dev,
sizeof(struct sde_mdp_hw_resource), GFP_KERNEL);
if (!mdp_hw)
return ERR_PTR(-ENOMEM);
mdp_hw->ctl = sde_mdp_ctl_alloc(mdata, offset);
if (IS_ERR_OR_NULL(mdp_hw->ctl)) {
SDEROT_ERR("unable to allocate ctl\n");
ret = -ENODEV;
goto error;
}
mdp_hw->ctl->irq_num = irq_num;
mdp_hw->wb = sde_mdp_wb_assign(wb_id, mdp_hw->ctl->num);
if (IS_ERR_OR_NULL(mdp_hw->wb)) {
SDEROT_ERR("unable to allocate wb\n");
ret = -ENODEV;
goto error;
}
mdp_hw->ctl->wb = mdp_hw->wb;
mdp_hw->mixer = sde_mdp_mixer_assign(mdp_hw->wb->num, true);
if (IS_ERR_OR_NULL(mdp_hw->mixer)) {
SDEROT_ERR("unable to allocate wb mixer\n");
ret = -ENODEV;
goto error;
}
mdp_hw->ctl->mixer_left = mdp_hw->mixer;
mdp_hw->mixer->ctl = mdp_hw->ctl;
mdp_hw->mixer->rotator_mode = true;
switch (mdp_hw->mixer->num) {
case SDE_MDP_WB_LAYERMIXER0:
mdp_hw->ctl->opmode = SDE_MDP_CTL_OP_ROT0_MODE;
break;
case SDE_MDP_WB_LAYERMIXER1:
mdp_hw->ctl->opmode = SDE_MDP_CTL_OP_ROT1_MODE;
break;
default:
SDEROT_ERR("invalid layer mixer=%d\n", mdp_hw->mixer->num);
ret = -EINVAL;
goto error;
}
mdp_hw->ctl->ops.start_fnc = sde_mdp_writeback_start;
mdp_hw->ctl->wb_type = SDE_MDP_WB_CTL_TYPE_BLOCK;
if (mdp_hw->ctl->ops.start_fnc)
ret = mdp_hw->ctl->ops.start_fnc(mdp_hw->ctl);
if (ret)
goto error;
/* override from dt */
pipe_ndx = wb_id;
mdp_hw->pipe = sde_mdp_pipe_assign(mdata, mdp_hw->mixer, pipe_ndx);
if (IS_ERR_OR_NULL(mdp_hw->pipe)) {
SDEROT_ERR("dma pipe allocation failed\n");
ret = -ENODEV;
goto error;
}
mdp_hw->pipe->mixer_left = mdp_hw->mixer;
mdp_hw->hw.wb_id = mdp_hw->wb->num;
mdp_hw->hw.pending_count = 0;
atomic_set(&mdp_hw->hw.num_active, 0);
mdp_hw->hw.max_active = 1;
init_waitqueue_head(&mdp_hw->hw.wait_queue);
return mdp_hw;
error:
if (!IS_ERR_OR_NULL(mdp_hw->pipe))
sde_mdp_pipe_destroy(mdp_hw->pipe);
if (!IS_ERR_OR_NULL(mdp_hw->ctl)) {
if (mdp_hw->ctl->ops.stop_fnc)
mdp_hw->ctl->ops.stop_fnc(mdp_hw->ctl, 0);
sde_mdp_ctl_free(mdp_hw->ctl);
}
devm_kfree(&mgr->pdev->dev, mdp_hw);
return ERR_PTR(ret);
}
static void sde_rotator_hw_free(struct sde_rot_mgr *mgr,
struct sde_mdp_hw_resource *mdp_hw)
{
struct sde_mdp_mixer *mixer;
struct sde_mdp_ctl *ctl;
if (!mgr || !mdp_hw)
return;
mixer = mdp_hw->pipe->mixer_left;
sde_mdp_pipe_destroy(mdp_hw->pipe);
ctl = sde_mdp_ctl_mixer_switch(mixer->ctl,
SDE_MDP_WB_CTL_TYPE_BLOCK);
if (ctl) {
if (ctl->ops.stop_fnc)
ctl->ops.stop_fnc(ctl, 0);
sde_mdp_ctl_free(ctl);
}
devm_kfree(&mgr->pdev->dev, mdp_hw);
}
static struct sde_rot_hw_resource *sde_rotator_hw_alloc_ext(
struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
{
struct sde_mdp_hw_resource *mdp_hw;
struct sde_rotator_r1_data *hw_data;
if (!mgr || !mgr->hw_data)
return NULL;
hw_data = mgr->hw_data;
mdp_hw = hw_data->mdp_hw;
return &mdp_hw->hw;
}
static void sde_rotator_hw_free_ext(struct sde_rot_mgr *mgr,
struct sde_rot_hw_resource *hw)
{
/* currently nothing specific for this device */
}
static void sde_rotator_translate_rect(struct sde_rect *dst,
struct sde_rect *src)
{
dst->x = src->x;
dst->y = src->y;
dst->w = src->w;
dst->h = src->h;
}
static u32 sde_rotator_translate_flags(u32 input)
{
u32 output = 0;
if (input & SDE_ROTATION_NOP)
output |= SDE_ROT_NOP;
if (input & SDE_ROTATION_FLIP_LR)
output |= SDE_FLIP_LR;
if (input & SDE_ROTATION_FLIP_UD)
output |= SDE_FLIP_UD;
if (input & SDE_ROTATION_90)
output |= SDE_ROT_90;
if (input & SDE_ROTATION_DEINTERLACE)
output |= SDE_DEINTERLACE;
if (input & SDE_ROTATION_SECURE)
output |= SDE_SECURE_OVERLAY_SESSION;
return output;
}
static int sde_rotator_config_hw(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry)
{
struct sde_mdp_hw_resource *mdp_hw;
struct sde_mdp_pipe *pipe;
struct sde_rotation_item *item;
int ret;
if (!hw || !entry) {
SDEROT_ERR("null hw resource/entry");
return -EINVAL;
}
mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
pipe = mdp_hw->pipe;
item = &entry->item;
pipe->flags = sde_rotator_translate_flags(item->flags);
pipe->src_fmt = sde_get_format_params(item->input.format);
pipe->img_width = item->input.width;
pipe->img_height = item->input.height;
sde_rotator_translate_rect(&pipe->src, &item->src_rect);
sde_rotator_translate_rect(&pipe->dst, &item->src_rect);
pipe->params_changed++;
ret = sde_mdp_pipe_queue_data(pipe, &entry->src_buf);
SDEROT_DBG("Config pipe. src{%u,%u,%u,%u}f=%u\n"
"dst{%u,%u,%u,%u}f=%u session_id=%u\n",
item->src_rect.x, item->src_rect.y,
item->src_rect.w, item->src_rect.h, item->input.format,
item->dst_rect.x, item->dst_rect.y,
item->dst_rect.w, item->dst_rect.h, item->output.format,
item->session_id);
return ret;
}
static int sde_rotator_cancel_hw(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry)
{
return 0;
}
static int sde_rotator_abort_hw(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry)
{
return 0;
}
static int sde_rotator_kickoff_entry(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry)
{
struct sde_mdp_hw_resource *mdp_hw;
int ret;
struct sde_mdp_writeback_arg wb_args;
if (!hw || !entry) {
SDEROT_ERR("null hw resource/entry");
return -EINVAL;
}
wb_args.data = &entry->dst_buf;
wb_args.priv_data = entry;
mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
ret = sde_mdp_writeback_display_commit(mdp_hw->ctl, &wb_args);
return ret;
}
static int sde_rotator_wait_for_entry(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry)
{
struct sde_mdp_hw_resource *mdp_hw;
int ret;
struct sde_mdp_ctl *ctl;
if (!hw || !entry) {
SDEROT_ERR("null hw resource/entry");
return -EINVAL;
}
mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
ctl = mdp_hw->ctl;
ret = sde_mdp_display_wait4comp(ctl);
return ret;
}
static int sde_rotator_hw_validate_entry(struct sde_rot_mgr *mgr,
struct sde_rot_entry *entry)
{
int ret = 0;
u16 src_w, src_h, dst_w, dst_h, bit;
struct sde_rotation_item *item = &entry->item;
struct sde_mdp_format_params *fmt;
src_w = item->src_rect.w;
src_h = item->src_rect.h;
if (item->flags & SDE_ROTATION_90) {
dst_w = item->dst_rect.h;
dst_h = item->dst_rect.w;
} else {
dst_w = item->dst_rect.w;
dst_h = item->dst_rect.h;
}
entry->dnsc_factor_w = 0;
entry->dnsc_factor_h = 0;
if ((src_w != dst_w) || (src_h != dst_h)) {
if ((src_w % dst_w) || (src_h % dst_h)) {
SDEROT_DBG("non integral scale not support\n");
ret = -EINVAL;
goto dnsc_err;
}
entry->dnsc_factor_w = src_w / dst_w;
bit = fls(entry->dnsc_factor_w);
if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 5)) {
SDEROT_DBG("non power-of-2 scale not support\n");
ret = -EINVAL;
goto dnsc_err;
}
entry->dnsc_factor_h = src_h / dst_h;
bit = fls(entry->dnsc_factor_h);
if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 5)) {
SDEROT_DBG("non power-of-2 dscale not support\n");
ret = -EINVAL;
goto dnsc_err;
}
}
fmt = sde_get_format_params(item->output.format);
if (sde_mdp_is_ubwc_format(fmt) &&
(entry->dnsc_factor_h || entry->dnsc_factor_w)) {
SDEROT_DBG("downscale with ubwc not support\n");
ret = -EINVAL;
}
dnsc_err:
/* Downscaler does not support asymmetrical dnsc */
if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
SDEROT_DBG("asymmetric downscale not support\n");
ret = -EINVAL;
}
if (ret) {
entry->dnsc_factor_w = 0;
entry->dnsc_factor_h = 0;
}
return ret;
}
static ssize_t sde_rotator_hw_show_caps(struct sde_rot_mgr *mgr,
struct device_attribute *attr, char *buf, ssize_t len)
{
struct sde_rotator_r1_data *hw_data;
int cnt = 0;
if (!mgr || !buf)
return 0;
hw_data = mgr->hw_data;
#define SPRINT(fmt, ...) \
(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
SPRINT("wb_id=%d\n", hw_data->wb_id);
SPRINT("ctl_id=%d\n", hw_data->ctl_id);
return cnt;
}
static ssize_t sde_rotator_hw_show_state(struct sde_rot_mgr *mgr,
struct device_attribute *attr, char *buf, ssize_t len)
{
struct sde_rotator_r1_data *hw_data;
int cnt = 0;
if (!mgr || !buf)
return 0;
hw_data = mgr->hw_data;
#define SPRINT(fmt, ...) \
(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
if (hw_data && hw_data->mdp_hw) {
struct sde_rot_hw_resource *hw = &hw_data->mdp_hw->hw;
SPRINT("irq_num=%d\n", hw_data->irq_num);
SPRINT("max_active=%d\n", hw->max_active);
SPRINT("num_active=%d\n", atomic_read(&hw->num_active));
SPRINT("pending_cnt=%u\n", hw->pending_count);
}
return cnt;
}
/*
* sde_hw_rotator_get_pixfmt - get the indexed pixel format
* @mgr: Pointer to rotator manager
* @index: index of pixel format
* @input: true for input port; false for output port
* @mode: operating mode
*/
static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
int index, bool input, u32 mode)
{
if (input) {
if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
return sde_hw_rotator_input_pixfmts[index];
else
return 0;
} else {
if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
return sde_hw_rotator_output_pixfmts[index];
else
return 0;
}
}
/*
* sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
* @mgr: Pointer to rotator manager
* @pixfmt: pixel format to be verified
* @input: true for input port; false for output port
* @mode: operating mode
*/
static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
bool input, u32 mode)
{
int i;
if (input) {
for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
return true;
} else {
for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
return true;
}
return false;
}
static int sde_rotator_hw_parse_dt(struct sde_rotator_r1_data *hw_data,
struct platform_device *dev)
{
int ret = 0;
u32 data;
if (!hw_data || !dev)
return -EINVAL;
ret = of_property_read_u32(dev->dev.of_node,
"qcom,mdss-wb-id", &data);
if (ret)
hw_data->wb_id = -1;
else
hw_data->wb_id = (int) data;
ret = of_property_read_u32(dev->dev.of_node,
"qcom,mdss-ctl-id", &data);
if (ret)
hw_data->ctl_id = -1;
else
hw_data->ctl_id = (int) data;
return ret;
}
static int sde_rotator_hw_rev_init(struct sde_rot_data_type *mdata)
{
if (!mdata) {
SDEROT_ERR("null rotator data\n");
return -EINVAL;
}
clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
set_bit(SDE_CAPS_R1_WB, mdata->sde_caps_map);
return 0;
}
enum {
SDE_ROTATOR_INTR_WB_0,
SDE_ROTATOR_INTR_WB_1,
SDE_ROTATOR_INTR_MAX,
};
struct intr_callback {
void (*func)(void *data);
void *arg;
};
struct intr_callback sde_intr_cb[SDE_ROTATOR_INTR_MAX];
int sde_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
void (*fnc_ptr)(void *), void *arg)
{
if (intf_num >= SDE_ROTATOR_INTR_MAX) {
SDEROT_WARN("invalid intr type=%u intf_num=%u\n",
intr_type, intf_num);
return -EINVAL;
}
sde_intr_cb[intf_num].func = fnc_ptr;
sde_intr_cb[intf_num].arg = arg;
return 0;
}
static irqreturn_t sde_irq_handler(int irq, void *ptr)
{
struct sde_rot_data_type *mdata = ptr;
irqreturn_t ret = IRQ_NONE;
u32 isr;
isr = readl_relaxed(mdata->mdp_base + SDE_MDP_REG_INTR_STATUS);
SDEROT_DBG("intr_status = %8.8x\n", isr);
if (isr & SDE_MDP_INTR_WB_0_DONE) {
struct intr_callback *cb = &sde_intr_cb[SDE_ROTATOR_INTR_WB_0];
if (cb->func) {
writel_relaxed(SDE_MDP_INTR_WB_0_DONE,
mdata->mdp_base + SDE_MDP_REG_INTR_CLEAR);
cb->func(cb->arg);
ret = IRQ_HANDLED;
}
}
if (isr & SDE_MDP_INTR_WB_1_DONE) {
struct intr_callback *cb = &sde_intr_cb[SDE_ROTATOR_INTR_WB_1];
if (cb->func) {
writel_relaxed(SDE_MDP_INTR_WB_1_DONE,
mdata->mdp_base + SDE_MDP_REG_INTR_CLEAR);
cb->func(cb->arg);
ret = IRQ_HANDLED;
}
}
return ret;
}
static void sde_rotator_hw_destroy(struct sde_rot_mgr *mgr)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
struct sde_rotator_r1_data *hw_data;
if (!mgr || !mgr->pdev || !mgr->hw_data)
return;
hw_data = mgr->hw_data;
if (hw_data->irq_num >= 0)
devm_free_irq(&mgr->pdev->dev, hw_data->irq_num, mdata);
sde_rotator_hw_free(mgr, hw_data->mdp_hw);
devm_kfree(&mgr->pdev->dev, mgr->hw_data);
mgr->hw_data = NULL;
}
int sde_rotator_r1_init(struct sde_rot_mgr *mgr)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
struct sde_rotator_r1_data *hw_data;
int ret;
if (!mgr || !mgr->pdev) {
SDEROT_ERR("null rotator manager/platform device");
return -EINVAL;
}
hw_data = devm_kzalloc(&mgr->pdev->dev,
sizeof(struct sde_rotator_r1_data), GFP_KERNEL);
if (hw_data == NULL)
return -ENOMEM;
mgr->hw_data = hw_data;
mgr->ops_config_hw = sde_rotator_config_hw;
mgr->ops_cancel_hw = sde_rotator_cancel_hw;
mgr->ops_abort_hw = sde_rotator_abort_hw;
mgr->ops_kickoff_entry = sde_rotator_kickoff_entry;
mgr->ops_wait_for_entry = sde_rotator_wait_for_entry;
mgr->ops_hw_alloc = sde_rotator_hw_alloc_ext;
mgr->ops_hw_free = sde_rotator_hw_free_ext;
mgr->ops_hw_destroy = sde_rotator_hw_destroy;
mgr->ops_hw_validate_entry = sde_rotator_hw_validate_entry;
mgr->ops_hw_show_caps = sde_rotator_hw_show_caps;
mgr->ops_hw_show_state = sde_rotator_hw_show_state;
mgr->ops_hw_create_debugfs = sde_rotator_r1_create_debugfs;
mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
ret = sde_rotator_hw_parse_dt(mgr->hw_data, mgr->pdev);
if (ret)
goto error_parse_dt;
hw_data->irq_num = platform_get_irq(mgr->pdev, 0);
if (hw_data->irq_num < 0) {
SDEROT_ERR("fail to get rotator irq\n");
} else {
ret = devm_request_threaded_irq(&mgr->pdev->dev,
hw_data->irq_num,
sde_irq_handler, NULL,
0, "sde_rotator_r1", mdata);
if (ret) {
SDEROT_ERR("fail to request irq r:%d\n", ret);
hw_data->irq_num = -1;
} else {
disable_irq(hw_data->irq_num);
}
}
hw_data->mdp_hw = sde_rotator_hw_alloc(mgr, hw_data->ctl_id,
hw_data->wb_id, hw_data->irq_num);
if (IS_ERR_OR_NULL(hw_data->mdp_hw))
goto error_hw_alloc;
ret = sde_rotator_hw_rev_init(sde_rot_get_mdata());
if (ret)
goto error_hw_rev_init;
hw_data->mgr = mgr;
return 0;
error_hw_rev_init:
if (hw_data->irq_num >= 0)
devm_free_irq(&mgr->pdev->dev, hw_data->irq_num, mdata);
sde_rotator_hw_free(mgr, hw_data->mdp_hw);
error_hw_alloc:
devm_kfree(&mgr->pdev->dev, mgr->hw_data);
error_parse_dt:
return ret;
}

ファイルの表示

@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_R1_H__
#define __SDE_ROTATOR_R1_H__
#include <linux/types.h>
#include "sde_rotator_core.h"
int sde_rotator_r1_init(struct sde_rot_mgr *mgr);
#endif /* __SDE_ROTATOR_R1_H__ */

ファイルの表示

@@ -0,0 +1,259 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
#include "sde_rotator_r1_hwio.h"
#include "sde_rotator_util.h"
#include "sde_rotator_r1_internal.h"
#include "sde_rotator_core.h"
struct sde_mdp_ctl *sde_mdp_ctl_alloc(struct sde_rot_data_type *mdata,
u32 off)
{
struct sde_mdp_ctl *ctl = NULL;
static struct sde_mdp_ctl sde_ctl[5];
static const u32 offset[] = {0x00002000, 0x00002200, 0x00002400,
0x00002600, 0x00002800};
if (off >= ARRAY_SIZE(offset)) {
SDEROT_ERR("invalid parameters\n");
return ERR_PTR(-EINVAL);
}
ctl = &sde_ctl[off];
ctl->mdata = mdata;
ctl->num = off;
ctl->offset = offset[ctl->num];
ctl->base = mdata->sde_io.base + ctl->offset;
return ctl;
}
int sde_mdp_ctl_free(struct sde_mdp_ctl *ctl)
{
if (!ctl)
return -ENODEV;
if (ctl->wb)
sde_mdp_wb_free(ctl->wb);
ctl->is_secure = false;
ctl->mixer_left = NULL;
ctl->mixer_right = NULL;
ctl->wb = NULL;
memset(&ctl->ops, 0, sizeof(ctl->ops));
return 0;
}
struct sde_mdp_mixer *sde_mdp_mixer_assign(u32 id, bool wb)
{
struct sde_mdp_mixer *mixer = NULL;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
static struct sde_mdp_mixer sde_mixer[16];
static const u32 offset[] = {0x00048000, 0x00049000};
if (id >= ARRAY_SIZE(offset)) {
SDEROT_ERR("invalid parameters\n");
return ERR_PTR(-EINVAL);
}
mixer = &sde_mixer[id];
mixer->num = id;
mixer->offset = offset[mixer->num];
mixer->base = mdata->sde_io.base + mixer->offset;
return mixer;
}
static void sde_mdp_mixer_setup(struct sde_mdp_ctl *master_ctl,
int mixer_mux)
{
int i;
struct sde_mdp_ctl *ctl = NULL;
struct sde_mdp_mixer *mixer = sde_mdp_mixer_get(master_ctl,
mixer_mux);
if (!mixer)
return;
ctl = mixer->ctl;
if (!ctl)
return;
/* check if mixer setup for rotator is needed */
if (mixer->rotator_mode) {
int nmixers = 5;
for (i = 0; i < nmixers; i++)
sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_LAYER(i), 0);
return;
}
}
struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux)
{
struct sde_mdp_mixer *mixer = NULL;
if (!ctl) {
SDEROT_ERR("ctl not initialized\n");
return NULL;
}
switch (mux) {
case SDE_MDP_MIXER_MUX_DEFAULT:
case SDE_MDP_MIXER_MUX_LEFT:
mixer = ctl->mixer_left;
break;
case SDE_MDP_MIXER_MUX_RIGHT:
mixer = ctl->mixer_right;
break;
}
return mixer;
}
int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe)
{
u32 flush_bits = 0;
if (pipe->type == SDE_MDP_PIPE_TYPE_DMA)
flush_bits |= BIT(pipe->num) << 5;
else if (pipe->num == SDE_MDP_SSPP_VIG3 ||
pipe->num == SDE_MDP_SSPP_RGB3)
flush_bits |= BIT(pipe->num) << 10;
else if (pipe->type == SDE_MDP_PIPE_TYPE_CURSOR)
flush_bits |= BIT(22 + pipe->num - SDE_MDP_SSPP_CURSOR0);
else /* RGB/VIG 0-2 pipes */
flush_bits |= BIT(pipe->num);
return flush_bits;
}
int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
struct sde_mdp_mixer *mixer, int params_changed)
{
struct sde_mdp_ctl *ctl;
if (!pipe)
return -EINVAL;
if (!mixer)
return -EINVAL;
ctl = mixer->ctl;
if (!ctl)
return -EINVAL;
ctl->flush_bits |= sde_mdp_get_pipe_flush_bits(pipe);
return 0;
}
int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl)
{
int ret = 0;
if (!ctl) {
SDEROT_ERR("invalid ctl\n");
return -ENODEV;
}
if (ctl->ops.wait_fnc)
ret = ctl->ops.wait_fnc(ctl, NULL);
return ret;
}
int sde_mdp_display_commit(struct sde_mdp_ctl *ctl, void *arg,
struct sde_mdp_commit_cb *commit_cb)
{
int ret = 0;
u32 ctl_flush_bits = 0;
if (!ctl) {
SDEROT_ERR("display function not set\n");
return -ENODEV;
}
if (ctl->ops.prepare_fnc)
ret = ctl->ops.prepare_fnc(ctl, arg);
if (ret) {
SDEROT_ERR("error preparing display\n");
goto done;
}
sde_mdp_mixer_setup(ctl, SDE_MDP_MIXER_MUX_LEFT);
sde_mdp_mixer_setup(ctl, SDE_MDP_MIXER_MUX_RIGHT);
sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_TOP, ctl->opmode);
ctl->flush_bits |= BIT(17); /* CTL */
ctl_flush_bits = ctl->flush_bits;
sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_FLUSH, ctl_flush_bits);
/* ensure the flush command is issued after the barrier */
wmb();
ctl->flush_reg_data = ctl_flush_bits;
ctl->flush_bits = 0;
if (ctl->ops.display_fnc)
ret = ctl->ops.display_fnc(ctl, arg); /* DSI0 kickoff */
if (ret)
SDEROT_WARN("ctl %d error displaying frame\n", ctl->num);
done:
return ret;
}
/**
* @sde_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
* @ctl: Pointer to ctl structure to be switched.
* @return_type: wb_type of the ctl to be switched to.
*
* Virtual mixer switch should be performed only when there is no
* dedicated wfd block and writeback block is shared.
*/
struct sde_mdp_ctl *sde_mdp_ctl_mixer_switch(struct sde_mdp_ctl *ctl,
u32 return_type)
{
if (ctl->wb_type == return_type)
return ctl;
SDEROT_ERR("unable to switch mixer to type=%d\n", return_type);
return NULL;
}
struct sde_mdp_writeback *sde_mdp_wb_assign(u32 num, u32 reg_index)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
struct sde_mdp_writeback *wb = NULL;
static struct sde_mdp_writeback sde_wb[16];
static const u32 offset[] = {0x00065000, 0x00065800, 0x00066000};
if (num >= ARRAY_SIZE(offset)) {
SDEROT_ERR("invalid parameters\n");
return ERR_PTR(-EINVAL);
}
wb = &sde_wb[num];
wb->num = num;
wb->offset = offset[wb->num];
if (!wb)
return NULL;
wb->base = mdata->sde_io.base;
wb->base += wb->offset;
return wb;
}
void sde_mdp_wb_free(struct sde_mdp_writeback *wb)
{
}

ファイルの表示

@@ -0,0 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include "sde_rotator_r1_debug.h"
#include "sde_rotator_core.h"
#include "sde_rotator_r1.h"
#include "sde_rotator_r1_internal.h"
#if defined(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG) && \
defined(CONFIG_DEBUG_FS)
/*
* sde_rotator_r1_create_debugfs - Setup rotator r1 debugfs directory structure.
* @rot_dev: Pointer to rotator device
*/
int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root)
{
struct sde_rotator_r1_data *hw_data;
if (!mgr || !debugfs_root || !mgr->hw_data)
return -EINVAL;
hw_data = mgr->hw_data;
/* add debugfs */
return 0;
}
#endif

ファイルの表示

@@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_R3_DEBUG_H__
#define __SDE_ROTATOR_R3_DEBUG_H__
#include <linux/types.h>
#include <linux/dcache.h>
struct sde_rot_mgr;
#if defined(CONFIG_DEBUG_FS)
int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root);
#else
static inline
int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root)
{
return 0;
}
#endif
#endif /* __SDE_ROTATOR_R3_DEBUG_H__ */

ファイルの表示

@@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef SDE_ROTATOR_R1_HWIO_H
#define SDE_ROTATOR_R1_HWIO_H
#include <linux/bitops.h>
#define SDE_MDP_FETCH_CONFIG_RESET_VALUE 0x00000087
#define SDE_MDP_REG_HW_VERSION 0x0
#define SDE_MDP_REG_INTR_EN 0x00010
#define SDE_MDP_REG_INTR_STATUS 0x00014
#define SDE_MDP_REG_INTR_CLEAR 0x00018
#define SDE_MDP_INTR_WB_0_DONE BIT(0)
#define SDE_MDP_INTR_WB_1_DONE BIT(1)
enum mdss_mdp_intr_type {
SDE_MDP_IRQ_WB_ROT_COMP = 0,
SDE_MDP_IRQ_WB_WFD = 4,
SDE_MDP_IRQ_PING_PONG_COMP = 8,
SDE_MDP_IRQ_PING_PONG_RD_PTR = 12,
SDE_MDP_IRQ_PING_PONG_WR_PTR = 16,
SDE_MDP_IRQ_PING_PONG_AUTO_REF = 20,
SDE_MDP_IRQ_INTF_UNDER_RUN = 24,
SDE_MDP_IRQ_INTF_VSYNC = 25,
};
enum mdss_mdp_ctl_index {
SDE_MDP_CTL0,
SDE_MDP_CTL1,
SDE_MDP_CTL2,
SDE_MDP_CTL3,
SDE_MDP_CTL4,
SDE_MDP_CTL5,
SDE_MDP_MAX_CTL
};
#define SDE_MDP_REG_CTL_LAYER(lm) \
((lm == 5) ? (0x024) : ((lm) * 0x004))
#define SDE_MDP_REG_CTL_TOP 0x014
#define SDE_MDP_REG_CTL_FLUSH 0x018
#define SDE_MDP_REG_CTL_START 0x01C
#define SDE_MDP_CTL_OP_ROT0_MODE 0x1
#define SDE_MDP_CTL_OP_ROT1_MODE 0x2
enum sde_mdp_sspp_index {
SDE_MDP_SSPP_VIG0,
SDE_MDP_SSPP_VIG1,
SDE_MDP_SSPP_VIG2,
SDE_MDP_SSPP_RGB0,
SDE_MDP_SSPP_RGB1,
SDE_MDP_SSPP_RGB2,
SDE_MDP_SSPP_DMA0,
SDE_MDP_SSPP_DMA1,
SDE_MDP_SSPP_VIG3,
SDE_MDP_SSPP_RGB3,
SDE_MDP_SSPP_CURSOR0,
SDE_MDP_SSPP_CURSOR1,
SDE_MDP_MAX_SSPP
};
#define SDE_MDP_REG_SSPP_SRC_SIZE 0x000
#define SDE_MDP_REG_SSPP_SRC_IMG_SIZE 0x004
#define SDE_MDP_REG_SSPP_SRC_XY 0x008
#define SDE_MDP_REG_SSPP_OUT_SIZE 0x00C
#define SDE_MDP_REG_SSPP_OUT_XY 0x010
#define SDE_MDP_REG_SSPP_SRC0_ADDR 0x014
#define SDE_MDP_REG_SSPP_SRC1_ADDR 0x018
#define SDE_MDP_REG_SSPP_SRC2_ADDR 0x01C
#define SDE_MDP_REG_SSPP_SRC3_ADDR 0x020
#define SDE_MDP_REG_SSPP_SRC_YSTRIDE0 0x024
#define SDE_MDP_REG_SSPP_SRC_YSTRIDE1 0x028
#define SDE_MDP_REG_SSPP_STILE_FRAME_SIZE 0x02C
#define SDE_MDP_REG_SSPP_SRC_FORMAT 0x030
#define SDE_MDP_REG_SSPP_SRC_UNPACK_PATTERN 0x034
#define SDE_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_0 0x050
#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_1 0x054
#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_2 0x058
#define SDE_MDP_REG_SSPP_DANGER_LUT 0x060
#define SDE_MDP_REG_SSPP_SAFE_LUT 0x064
#define SDE_MDP_REG_SSPP_CREQ_LUT 0x068
#define SDE_MDP_REG_SSPP_QOS_CTRL 0x06C
#define SDE_MDP_REG_SSPP_CDP_CTRL 0x134
#define SDE_MDP_REG_SSPP_UBWC_ERROR_STATUS 0x138
#define SDE_MDP_REG_SSPP_SRC_OP_MODE 0x038
#define SDE_MDP_OP_FLIP_UD BIT(14)
#define SDE_MDP_OP_FLIP_LR BIT(13)
#define SDE_MDP_OP_BWC_EN BIT(0)
#define SDE_MDP_OP_BWC_LOSSLESS (0 << 1)
#define SDE_MDP_OP_BWC_Q_HIGH (1 << 1)
#define SDE_MDP_OP_BWC_Q_MED (2 << 1)
#define SDE_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
#define SDE_MDP_REG_SSPP_FETCH_CONFIG 0x048
#define SDE_MDP_REG_SSPP_VC1_RANGE 0x04C
#define SDE_MDP_REG_SSPP_SRC_ADDR_SW_STATUS 0x070
#define SDE_MDP_REG_SSPP_CURRENT_SRC0_ADDR 0x0A4
#define SDE_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
#define SDE_MDP_REG_SSPP_CURRENT_SRC2_ADDR 0x0AC
#define SDE_MDP_REG_SSPP_CURRENT_SRC3_ADDR 0x0B0
#define SDE_MDP_REG_SSPP_DECIMATION_CONFIG 0x0B4
enum sde_mdp_mixer_wb_index {
SDE_MDP_WB_LAYERMIXER0,
SDE_MDP_WB_LAYERMIXER1,
SDE_MDP_WB_MAX_LAYERMIXER,
};
enum mdss_mdp_writeback_index {
SDE_MDP_WRITEBACK0,
SDE_MDP_WRITEBACK1,
SDE_MDP_WRITEBACK2,
SDE_MDP_WRITEBACK3,
SDE_MDP_WRITEBACK4,
SDE_MDP_MAX_WRITEBACK
};
#define SDE_MDP_REG_WB_DST_FORMAT 0x000
#define SDE_MDP_REG_WB_DST_OP_MODE 0x004
#define SDE_MDP_REG_WB_DST_PACK_PATTERN 0x008
#define SDE_MDP_REG_WB_DST0_ADDR 0x00C
#define SDE_MDP_REG_WB_DST1_ADDR 0x010
#define SDE_MDP_REG_WB_DST2_ADDR 0x014
#define SDE_MDP_REG_WB_DST3_ADDR 0x018
#define SDE_MDP_REG_WB_DST_YSTRIDE0 0x01C
#define SDE_MDP_REG_WB_DST_YSTRIDE1 0x020
#define SDE_MDP_REG_WB_DST_WRITE_CONFIG 0x048
#define SDE_MDP_REG_WB_ROTATION_DNSCALER 0x050
#define SDE_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER 0x054
#define SDE_MDP_REG_WB_OUT_SIZE 0x074
#define SDE_MDP_REG_WB_ALPHA_X_VALUE 0x078
#define SDE_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
#endif

ファイルの表示

@@ -0,0 +1,165 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_R1_INTERNAL_H__
#define __SDE_ROTATOR_R1_INTERNAL_H__
#include <linux/types.h>
#include <linux/file.h>
#include <linux/kref.h>
#include <linux/kernel.h>
#include "sde_rotator_util.h"
/**
* enum sde_commit_stage_type - Indicate different commit stages
*/
enum sde_commit_stage_type {
SDE_COMMIT_STAGE_SETUP_DONE,
SDE_COMMIT_STAGE_READY_FOR_KICKOFF,
};
enum sde_mdp_wb_ctl_type {
SDE_MDP_WB_CTL_TYPE_BLOCK = 1,
SDE_MDP_WB_CTL_TYPE_LINE
};
enum sde_mdp_mixer_mux {
SDE_MDP_MIXER_MUX_DEFAULT,
SDE_MDP_MIXER_MUX_LEFT,
SDE_MDP_MIXER_MUX_RIGHT,
};
enum sde_mdp_pipe_type {
SDE_MDP_PIPE_TYPE_UNUSED,
SDE_MDP_PIPE_TYPE_VIG,
SDE_MDP_PIPE_TYPE_RGB,
SDE_MDP_PIPE_TYPE_DMA,
SDE_MDP_PIPE_TYPE_CURSOR,
};
struct sde_mdp_data;
struct sde_mdp_ctl;
struct sde_mdp_pipe;
struct sde_mdp_mixer;
struct sde_mdp_wb;
struct sde_mdp_writeback {
u32 num;
char __iomem *base;
u32 offset;
};
struct sde_mdp_ctl_intfs_ops {
int (*start_fnc)(struct sde_mdp_ctl *ctl);
int (*stop_fnc)(struct sde_mdp_ctl *ctl, int panel_power_state);
int (*prepare_fnc)(struct sde_mdp_ctl *ctl, void *arg);
int (*display_fnc)(struct sde_mdp_ctl *ctl, void *arg);
int (*wait_fnc)(struct sde_mdp_ctl *ctl, void *arg);
};
struct sde_mdp_ctl {
u32 num;
char __iomem *base;
u32 opmode;
u32 flush_bits;
u32 flush_reg_data;
bool is_secure;
struct sde_rot_data_type *mdata;
struct sde_mdp_mixer *mixer_left;
struct sde_mdp_mixer *mixer_right;
void *priv_data;
u32 wb_type;
struct sde_mdp_writeback *wb;
struct sde_mdp_ctl_intfs_ops ops;
u32 offset;
int irq_num;
};
struct sde_mdp_mixer {
u32 num;
char __iomem *base;
u8 rotator_mode;
struct sde_mdp_ctl *ctl;
u32 offset;
};
struct sde_mdp_shared_reg_ctrl {
u32 reg_off;
u32 bit_off;
};
struct sde_mdp_pipe {
u32 num;
u32 type;
u32 ndx;
char __iomem *base;
u32 xin_id;
u32 flags;
u32 bwc_mode;
u16 img_width;
u16 img_height;
u8 horz_deci;
u8 vert_deci;
struct sde_rect src;
struct sde_rect dst;
struct sde_mdp_format_params *src_fmt;
struct sde_mdp_plane_sizes src_planes;
struct sde_mdp_mixer *mixer_left;
struct sde_mdp_mixer *mixer_right;
struct sde_mdp_shared_reg_ctrl clk_ctrl;
u32 params_changed;
u32 offset;
};
struct sde_mdp_writeback_arg {
struct sde_mdp_data *data;
void *priv_data;
};
struct sde_mdp_commit_cb {
void *data;
int (*commit_cb_fnc)(enum sde_commit_stage_type commit_state,
void *data);
};
static inline void sde_mdp_ctl_write(struct sde_mdp_ctl *ctl,
u32 reg, u32 val)
{
SDEROT_DBG("ctl%d:%6.6x:%8.8x\n", ctl->num, ctl->offset + reg, val);
writel_relaxed(val, ctl->base + reg);
}
static inline bool sde_mdp_is_nrt_vbif_client(struct sde_rot_data_type *mdata,
struct sde_mdp_pipe *pipe)
{
return mdata->vbif_nrt_io.base && pipe->mixer_left &&
pipe->mixer_left->rotator_mode;
}
int sde_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
void (*fnc_ptr)(void *), void *arg);
int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl);
int sde_mdp_writeback_display_commit(struct sde_mdp_ctl *ctl, void *arg);
int sde_mdp_pipe_queue_data(struct sde_mdp_pipe *pipe,
struct sde_mdp_data *src_data);
struct sde_mdp_ctl *sde_mdp_ctl_alloc(struct sde_rot_data_type *mdata,
u32 off);
struct sde_mdp_writeback *sde_mdp_wb_assign(u32 num, u32 reg_index);
void sde_mdp_wb_free(struct sde_mdp_writeback *wb);
struct sde_mdp_mixer *sde_mdp_mixer_assign(u32 id, bool wb);
int sde_mdp_writeback_start(struct sde_mdp_ctl *ctl);
struct sde_mdp_pipe *sde_mdp_pipe_assign(struct sde_rot_data_type *mdata,
struct sde_mdp_mixer *mixer, u32 ndx);
int sde_mdp_pipe_destroy(struct sde_mdp_pipe *pipe);
int sde_mdp_ctl_free(struct sde_mdp_ctl *ctl);
int sde_mdp_display_commit(struct sde_mdp_ctl *ctl, void *arg,
struct sde_mdp_commit_cb *commit_cb);
int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
struct sde_mdp_mixer *mixer, int params_changed);
int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe);
struct sde_mdp_ctl *sde_mdp_ctl_mixer_switch(struct sde_mdp_ctl *ctl,
u32 return_type);
struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux);
#endif /* __SDE_ROTATOR_R1_INTERNAL_H__ */

ファイルの表示

@@ -0,0 +1,422 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/bitmap.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include "sde_rotator_r1_hwio.h"
#include "sde_rotator_base.h"
#include "sde_rotator_util.h"
#include "sde_rotator_r1_internal.h"
#include "sde_rotator_core.h"
#include "sde_rotator_trace.h"
#define SMP_MB_SIZE (mdss_res->smp_mb_size)
#define SMP_MB_CNT (mdss_res->smp_mb_cnt)
#define SMP_MB_ENTRY_SIZE 16
#define MAX_BPP 4
#define PIPE_CLEANUP_TIMEOUT_US 100000
/* following offsets are relative to ctrl register bit offset */
#define CLK_FORCE_ON_OFFSET 0x0
#define CLK_FORCE_OFF_OFFSET 0x1
/* following offsets are relative to status register bit offset */
#define CLK_STATUS_OFFSET 0x0
#define QOS_LUT_NRT_READ 0x0
#define PANIC_LUT_NRT_READ 0x0
#define ROBUST_LUT_NRT_READ 0xFFFF
/* Priority 2, no panic */
#define VBLANK_PANIC_DEFAULT_CONFIG 0x200000
static inline void sde_mdp_pipe_write(struct sde_mdp_pipe *pipe,
u32 reg, u32 val)
{
SDEROT_DBG("pipe%d:%6.6x:%8.8x\n", pipe->num, pipe->offset + reg, val);
writel_relaxed(val, pipe->base + reg);
}
static int sde_mdp_pipe_qos_lut(struct sde_mdp_pipe *pipe)
{
u32 qos_lut;
qos_lut = QOS_LUT_NRT_READ; /* low priority for nrt */
trace_rot_perf_set_qos_luts(pipe->num, pipe->src_fmt->format,
qos_lut, sde_mdp_is_linear_format(pipe->src_fmt));
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_CREQ_LUT,
qos_lut);
return 0;
}
/**
* @sde_mdp_pipe_nrt_vbif_setup -
* @mdata: pointer to global driver data.
* @pipe: pointer to a pipe
*
* This function assumes that clocks are enabled, so it is callers
* responsibility to enable clocks before calling this function.
*/
static void sde_mdp_pipe_nrt_vbif_setup(struct sde_rot_data_type *mdata,
struct sde_mdp_pipe *pipe)
{
uint32_t nrt_vbif_client_sel;
if (pipe->type != SDE_MDP_PIPE_TYPE_DMA)
return;
nrt_vbif_client_sel = readl_relaxed(mdata->mdp_base +
MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
if (sde_mdp_is_nrt_vbif_client(mdata, pipe))
nrt_vbif_client_sel |= BIT(pipe->num - SDE_MDP_SSPP_DMA0);
else
nrt_vbif_client_sel &= ~BIT(pipe->num - SDE_MDP_SSPP_DMA0);
SDEROT_DBG("mdp:%6.6x:%8.8x\n", MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL,
nrt_vbif_client_sel);
writel_relaxed(nrt_vbif_client_sel,
mdata->mdp_base + MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
}
/**
* sde_mdp_qos_vbif_remapper_setup - Program the VBIF QoS remapper
* registers based on real or non real time clients
* @mdata: Pointer to the global mdss data structure.
* @pipe: Pointer to source pipe struct to get xin id's.
* @is_realtime: To determine if pipe's client is real or
* non real time.
* This function assumes that clocks are on, so it is caller responsibility to
* call this function with clocks enabled.
*/
static void sde_mdp_qos_vbif_remapper_setup(struct sde_rot_data_type *mdata,
struct sde_mdp_pipe *pipe, bool is_realtime)
{
u32 mask, reg_val, i, vbif_qos;
if (mdata->npriority_lvl == 0)
return;
for (i = 0; i < mdata->npriority_lvl; i++) {
reg_val = SDE_VBIF_READ(mdata, SDE_VBIF_QOS_REMAP_BASE + i*4);
mask = 0x3 << (pipe->xin_id * 2);
reg_val &= ~(mask);
vbif_qos = is_realtime ?
mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
reg_val |= vbif_qos << (pipe->xin_id * 2);
SDE_VBIF_WRITE(mdata, SDE_VBIF_QOS_REMAP_BASE + i*4, reg_val);
}
}
struct sde_mdp_pipe *sde_mdp_pipe_assign(struct sde_rot_data_type *mdata,
struct sde_mdp_mixer *mixer, u32 ndx)
{
struct sde_mdp_pipe *pipe = NULL;
static struct sde_mdp_pipe sde_pipe[16];
static const u32 offset[] = {0x00025000, 0x00027000};
static const u32 xin_id[] = {2, 10};
static const struct sde_mdp_shared_reg_ctrl clk_ctrl[] = {
{0x2AC, 8},
{0x2B4, 8}
};
if (ndx >= ARRAY_SIZE(offset)) {
SDEROT_ERR("invalid parameters\n");
return ERR_PTR(-EINVAL);
}
pipe = &sde_pipe[ndx];
pipe->num = ndx + SDE_MDP_SSPP_DMA0;
pipe->offset = offset[pipe->num - SDE_MDP_SSPP_DMA0];
pipe->xin_id = xin_id[pipe->num - SDE_MDP_SSPP_DMA0];
pipe->base = mdata->sde_io.base + pipe->offset;
pipe->type = SDE_MDP_PIPE_TYPE_DMA;
pipe->mixer_left = mixer;
pipe->clk_ctrl = clk_ctrl[pipe->num - SDE_MDP_SSPP_DMA0];
return pipe;
}
int sde_mdp_pipe_destroy(struct sde_mdp_pipe *pipe)
{
return 0;
}
void sde_mdp_pipe_position_update(struct sde_mdp_pipe *pipe,
struct sde_rect *src, struct sde_rect *dst)
{
u32 src_size, src_xy, dst_size, dst_xy;
src_size = (src->h << 16) | src->w;
src_xy = (src->y << 16) | src->x;
dst_size = (dst->h << 16) | dst->w;
dst_xy = (dst->y << 16) | dst->x;
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_SIZE, src_size);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_XY, src_xy);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_OUT_SIZE, dst_size);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_OUT_XY, dst_xy);
}
static int sde_mdp_image_setup(struct sde_mdp_pipe *pipe,
struct sde_mdp_data *data)
{
u32 img_size, ystride0, ystride1;
u32 width, height, decimation;
int ret = 0;
struct sde_rect dst, src;
bool rotation = false;
SDEROT_DBG(
"ctl: %d pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
pipe->mixer_left->ctl->num, pipe->num,
pipe->img_width, pipe->img_height,
pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
width = pipe->img_width;
height = pipe->img_height;
if (pipe->flags & SDE_SOURCE_ROTATED_90)
rotation = true;
sde_mdp_get_plane_sizes(pipe->src_fmt, width, height,
&pipe->src_planes, pipe->bwc_mode, rotation);
if (data != NULL) {
ret = sde_mdp_data_check(data, &pipe->src_planes,
pipe->src_fmt);
if (ret)
return ret;
}
if ((pipe->flags & SDE_DEINTERLACE) &&
!(pipe->flags & SDE_SOURCE_ROTATED_90)) {
int i;
for (i = 0; i < pipe->src_planes.num_planes; i++)
pipe->src_planes.ystride[i] *= 2;
width *= 2;
height /= 2;
}
decimation = ((1 << pipe->horz_deci) - 1) << 8;
decimation |= ((1 << pipe->vert_deci) - 1);
if (decimation)
SDEROT_DBG("Image decimation h=%d v=%d\n",
pipe->horz_deci, pipe->vert_deci);
dst = pipe->dst;
src = pipe->src;
ystride0 = (pipe->src_planes.ystride[0]) |
(pipe->src_planes.ystride[1] << 16);
ystride1 = (pipe->src_planes.ystride[2]) |
(pipe->src_planes.ystride[3] << 16);
img_size = (height << 16) | width;
sde_mdp_pipe_position_update(pipe, &src, &dst);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_YSTRIDE0, ystride0);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_YSTRIDE1, ystride1);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_DECIMATION_CONFIG,
decimation);
return 0;
}
static int sde_mdp_format_setup(struct sde_mdp_pipe *pipe)
{
struct sde_mdp_format_params *fmt;
u32 chroma_samp, unpack, src_format;
u32 secure = 0;
u32 opmode;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
fmt = pipe->src_fmt;
if (pipe->flags & SDE_SECURE_OVERLAY_SESSION)
secure = 0xF;
opmode = pipe->bwc_mode;
if (pipe->flags & SDE_FLIP_LR)
opmode |= SDE_MDP_OP_FLIP_LR;
if (pipe->flags & SDE_FLIP_UD)
opmode |= SDE_MDP_OP_FLIP_UD;
SDEROT_DBG("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
opmode);
chroma_samp = fmt->chroma_sample;
if (pipe->flags & SDE_SOURCE_ROTATED_90) {
if (chroma_samp == SDE_MDP_CHROMA_H2V1)
chroma_samp = SDE_MDP_CHROMA_H1V2;
else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
chroma_samp = SDE_MDP_CHROMA_H2V1;
}
src_format = (chroma_samp << 23) |
(fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) |
(fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C0_G_Y] << 0);
if (sde_mdp_is_tilea4x_format(fmt))
src_format |= BIT(30);
if (sde_mdp_is_tilea5x_format(fmt))
src_format |= BIT(31);
if (pipe->flags & SDE_ROT_90)
src_format |= BIT(11); /* ROT90 */
if (fmt->alpha_enable &&
fmt->fetch_planes != SDE_MDP_PLANE_INTERLEAVED)
src_format |= BIT(8); /* SRCC3_EN */
unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
(fmt->element[1] << 8) | (fmt->element[0] << 0);
src_format |= ((fmt->unpack_count - 1) << 12) |
(fmt->unpack_tight << 17) |
(fmt->unpack_align_msb << 18) |
((fmt->bpp - 1) << 9);
if (sde_mdp_is_ubwc_format(fmt))
opmode |= BIT(0);
if (fmt->is_yuv)
src_format |= BIT(15);
if (fmt->frame_format != SDE_MDP_FMT_LINEAR
&& mdata->highest_bank_bit) {
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_FETCH_CONFIG,
SDE_MDP_FETCH_CONFIG_RESET_VALUE |
mdata->highest_bank_bit << 18);
}
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_FORMAT, src_format);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_OP_MODE, opmode);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
/* clear UBWC error */
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_UBWC_ERROR_STATUS, BIT(31));
return 0;
}
static int sde_mdp_src_addr_setup(struct sde_mdp_pipe *pipe,
struct sde_mdp_data *src_data)
{
struct sde_mdp_data data = *src_data;
u32 x = 0, y = 0;
int ret = 0;
SDEROT_DBG("pnum=%d\n", pipe->num);
ret = sde_mdp_data_check(&data, &pipe->src_planes, pipe->src_fmt);
if (ret)
return ret;
sde_rot_data_calc_offset(&data, x, y,
&pipe->src_planes, pipe->src_fmt);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC0_ADDR, data.p[0].addr);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC1_ADDR, data.p[1].addr);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC2_ADDR, data.p[2].addr);
sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC3_ADDR, data.p[3].addr);
return 0;
}
static void sde_mdp_set_ot_limit_pipe(struct sde_mdp_pipe *pipe)
{
struct sde_mdp_set_ot_params ot_params = {0,};
ot_params.xin_id = pipe->xin_id;
ot_params.num = pipe->num;
ot_params.width = pipe->src.w;
ot_params.height = pipe->src.h;
ot_params.fps = 60;
ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
ot_params.reg_off_mdp_clk_ctrl = pipe->clk_ctrl.reg_off;
ot_params.bit_off_mdp_clk_ctrl = pipe->clk_ctrl.bit_off +
CLK_FORCE_ON_OFFSET;
ot_params.fmt = (pipe->src_fmt) ? pipe->src_fmt->format : 0;
sde_mdp_set_ot_limit(&ot_params);
}
int sde_mdp_pipe_queue_data(struct sde_mdp_pipe *pipe,
struct sde_mdp_data *src_data)
{
int ret = 0;
u32 params_changed;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
if (!pipe) {
SDEROT_ERR("pipe not setup properly for queue\n");
return -ENODEV;
}
/*
* Reprogram the pipe when there is no dedicated wfd blk and
* virtual mixer is allocated for the DMA pipe during concurrent
* line and block mode operations
*/
params_changed = (pipe->params_changed);
if (params_changed) {
bool is_realtime = !(pipe->mixer_left->rotator_mode);
sde_mdp_qos_vbif_remapper_setup(mdata, pipe, is_realtime);
if (mdata->vbif_nrt_io.base)
sde_mdp_pipe_nrt_vbif_setup(mdata, pipe);
}
if (params_changed) {
pipe->params_changed = 0;
ret = sde_mdp_image_setup(pipe, src_data);
if (ret) {
SDEROT_ERR("image setup error for pnum=%d\n",
pipe->num);
goto done;
}
ret = sde_mdp_format_setup(pipe);
if (ret) {
SDEROT_ERR("format %d setup error pnum=%d\n",
pipe->src_fmt->format, pipe->num);
goto done;
}
if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map))
sde_mdp_pipe_qos_lut(pipe);
sde_mdp_set_ot_limit_pipe(pipe);
}
ret = sde_mdp_src_addr_setup(pipe, src_data);
if (ret) {
SDEROT_ERR("addr setup error for pnum=%d\n", pipe->num);
goto done;
}
sde_mdp_mixer_pipe_update(pipe, pipe->mixer_left,
params_changed);
done:
return ret;
}

ファイルの表示

@@ -0,0 +1,523 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "sde_rotator_r1_hwio.h"
#include "sde_rotator_util.h"
#include "sde_rotator_r1_internal.h"
#include "sde_rotator_core.h"
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
#define KOFF_TIMEOUT msecs_to_jiffies(84)
/*
* if BWC enabled and format is H1V2 or 420, do not use site C or I.
* Hence, set the bits 29:26 in format register, as zero.
*/
#define BWC_FMT_MASK 0xC3FFFFFF
#define MDSS_DEFAULT_OT_SETTING 0x10
enum sde_mdp_writeback_type {
SDE_MDP_WRITEBACK_TYPE_ROTATOR,
SDE_MDP_WRITEBACK_TYPE_LINE,
SDE_MDP_WRITEBACK_TYPE_WFD,
};
struct sde_mdp_writeback_ctx {
u32 wb_num;
char __iomem *base;
u8 ref_cnt;
u8 type;
struct completion wb_comp;
int comp_cnt;
u32 intr_type;
u32 intf_num;
u32 xin_id;
u32 wr_lim;
struct sde_mdp_shared_reg_ctrl clk_ctrl;
u32 opmode;
struct sde_mdp_format_params *dst_fmt;
u16 img_width;
u16 img_height;
u16 width;
u16 height;
struct sde_rect dst_rect;
u32 dnsc_factor_w;
u32 dnsc_factor_h;
u8 rot90;
u32 bwc_mode;
struct sde_mdp_plane_sizes dst_planes;
ktime_t start_time;
ktime_t end_time;
u32 offset;
};
static struct sde_mdp_writeback_ctx wb_ctx_list[SDE_MDP_MAX_WRITEBACK] = {
{
.type = SDE_MDP_WRITEBACK_TYPE_ROTATOR,
.intr_type = SDE_MDP_IRQ_WB_ROT_COMP,
.intf_num = 0,
.xin_id = 3,
.clk_ctrl.reg_off = 0x2BC,
.clk_ctrl.bit_off = 0x8,
},
{
.type = SDE_MDP_WRITEBACK_TYPE_ROTATOR,
.intr_type = SDE_MDP_IRQ_WB_ROT_COMP,
.intf_num = 1,
.xin_id = 11,
.clk_ctrl.reg_off = 0x2BC,
.clk_ctrl.bit_off = 0xC,
},
};
static inline void sde_wb_write(struct sde_mdp_writeback_ctx *ctx,
u32 reg, u32 val)
{
SDEROT_DBG("wb%d:%6.6x:%8.8x\n", ctx->wb_num, ctx->offset + reg, val);
writel_relaxed(val, ctx->base + reg);
}
static int sde_mdp_writeback_addr_setup(struct sde_mdp_writeback_ctx *ctx,
const struct sde_mdp_data *in_data)
{
int ret;
struct sde_mdp_data data;
if (!in_data)
return -EINVAL;
data = *in_data;
SDEROT_DBG("wb_num=%d addr=0x%pa\n", ctx->wb_num, &data.p[0].addr);
ret = sde_mdp_data_check(&data, &ctx->dst_planes, ctx->dst_fmt);
if (ret)
return ret;
sde_rot_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
&ctx->dst_planes, ctx->dst_fmt);
if ((ctx->dst_fmt->fetch_planes == SDE_MDP_PLANE_PLANAR) &&
(ctx->dst_fmt->element[0] == C1_B_Cb))
swap(data.p[1].addr, data.p[2].addr);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
return 0;
}
static int sde_mdp_writeback_format_setup(struct sde_mdp_writeback_ctx *ctx,
u32 format, struct sde_mdp_ctl *ctl)
{
struct sde_mdp_format_params *fmt;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 dnsc_factor, write_config = 0;
u32 opmode = ctx->opmode;
bool rotation = false;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
SDEROT_DBG("wb_num=%d format=%d\n", ctx->wb_num, format);
if (ctx->rot90)
rotation = true;
fmt = sde_get_format_params(format);
if (!fmt) {
SDEROT_ERR("wb format=%d not supported\n", format);
return -EINVAL;
}
sde_mdp_get_plane_sizes(fmt, ctx->img_width, ctx->img_height,
&ctx->dst_planes,
ctx->opmode & SDE_MDP_OP_BWC_EN, rotation);
ctx->dst_fmt = fmt;
chroma_samp = fmt->chroma_sample;
dst_format = (chroma_samp << 23) |
(fmt->fetch_planes << 19) |
(fmt->bits[C3_ALPHA] << 6) |
(fmt->bits[C2_R_Cr] << 4) |
(fmt->bits[C1_B_Cb] << 2) |
(fmt->bits[C0_G_Y] << 0);
dst_format &= BWC_FMT_MASK;
if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
dst_format |= BIT(8); /* DSTC3_EN */
if (!fmt->alpha_enable)
dst_format |= BIT(14); /* DST_ALPHA_X */
}
if (fmt->is_yuv)
dst_format |= BIT(15);
pattern = (fmt->element[3] << 24) |
(fmt->element[2] << 16) |
(fmt->element[1] << 8) |
(fmt->element[0] << 0);
dst_format |= (fmt->unpack_align_msb << 18) |
(fmt->unpack_tight << 17) |
((fmt->unpack_count - 1) << 12) |
((fmt->bpp - 1) << 9);
ystride0 = (ctx->dst_planes.ystride[0]) |
(ctx->dst_planes.ystride[1] << 16);
ystride1 = (ctx->dst_planes.ystride[2]) |
(ctx->dst_planes.ystride[3] << 16);
outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
if (sde_mdp_is_ubwc_format(fmt)) {
opmode |= BIT(0);
dst_format |= BIT(31);
if (mdata->highest_bank_bit)
write_config |= (mdata->highest_bank_bit << 8);
if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC)
write_config |= 0x8;
}
if (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR) {
dnsc_factor = (ctx->dnsc_factor_h) | (ctx->dnsc_factor_w << 16);
sde_wb_write(ctx, SDE_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER,
dnsc_factor);
}
sde_wb_write(ctx, SDE_MDP_REG_WB_ALPHA_X_VALUE, 0xFF);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_FORMAT, dst_format);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_OP_MODE, opmode);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_PACK_PATTERN, pattern);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_YSTRIDE0, ystride0);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_YSTRIDE1, ystride1);
sde_wb_write(ctx, SDE_MDP_REG_WB_OUT_SIZE, outsize);
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_WRITE_CONFIG, write_config);
return 0;
}
static int sde_mdp_writeback_prepare_rot(struct sde_mdp_ctl *ctl, void *arg)
{
struct sde_mdp_writeback_ctx *ctx;
struct sde_mdp_writeback_arg *wb_args;
struct sde_rot_entry *entry;
struct sde_rotation_item *item;
struct sde_rot_data_type *mdata;
u32 format;
ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
if (!ctx)
return -ENODEV;
wb_args = (struct sde_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
entry = (struct sde_rot_entry *) wb_args->priv_data;
if (!entry) {
SDEROT_ERR("unable to retrieve rot session ctl=%d\n", ctl->num);
return -ENODEV;
}
item = &entry->item;
mdata = ctl->mdata;
if (!mdata) {
SDEROT_ERR("no mdata attached to ctl=%d", ctl->num);
return -ENODEV;
}
SDEROT_DBG("rot setup wb_num=%d\n", ctx->wb_num);
ctx->opmode = BIT(6); /* ROT EN */
if (ctl->mdata->rot_block_size == 128)
ctx->opmode |= BIT(4); /* block size 128 */
ctx->bwc_mode = 0;
ctx->opmode |= ctx->bwc_mode;
ctx->img_width = item->output.width;
ctx->img_height = item->output.height;
ctx->width = ctx->dst_rect.w = item->dst_rect.w;
ctx->height = ctx->dst_rect.h = item->dst_rect.h;
ctx->dst_rect.x = item->dst_rect.x;
ctx->dst_rect.y = item->dst_rect.y;
ctx->dnsc_factor_w = entry->dnsc_factor_w;
ctx->dnsc_factor_h = entry->dnsc_factor_h;
ctx->rot90 = !!(item->flags & SDE_ROTATION_90);
format = item->output.format;
if (ctx->rot90)
ctx->opmode |= BIT(5); /* ROT 90 */
return sde_mdp_writeback_format_setup(ctx, format, ctl);
}
static int sde_mdp_writeback_stop(struct sde_mdp_ctl *ctl,
int panel_power_state)
{
struct sde_mdp_writeback_ctx *ctx;
SDEROT_DBG("stop ctl=%d\n", ctl->num);
ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
if (ctx) {
sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
NULL, NULL);
complete_all(&ctx->wb_comp);
ctl->priv_data = NULL;
ctx->ref_cnt--;
}
return 0;
}
static void sde_mdp_writeback_intr_done(void *arg)
{
struct sde_mdp_ctl *ctl = arg;
struct sde_mdp_writeback_ctx *ctx = ctl->priv_data;
if (!ctx) {
SDEROT_ERR("invalid ctx\n");
return;
}
SDEROT_DBG("intr wb_num=%d\n", ctx->wb_num);
if (ctl->irq_num >= 0)
disable_irq_nosync(ctl->irq_num);
complete_all(&ctx->wb_comp);
}
static int sde_mdp_wb_wait4comp(struct sde_mdp_ctl *ctl, void *arg)
{
struct sde_mdp_writeback_ctx *ctx;
int rc = 0;
u64 rot_time = 0;
u32 status, mask, isr = 0;
ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
if (!ctx) {
SDEROT_ERR("invalid ctx\n");
return -ENODEV;
}
if (ctx->comp_cnt == 0)
return rc;
if (ctl->irq_num >= 0) {
rc = wait_for_completion_timeout(&ctx->wb_comp,
KOFF_TIMEOUT);
sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
NULL, NULL);
if (rc == 0) {
mask = BIT(ctx->intr_type + ctx->intf_num);
isr = readl_relaxed(ctl->mdata->mdp_base +
SDE_MDP_REG_INTR_STATUS);
status = mask & isr;
SDEROT_INFO_ONCE(
"mask: 0x%x, isr: 0x%x, status: 0x%x\n",
mask, isr, status);
if (status) {
SDEROT_WARN("wb done but irq not triggered\n");
writel_relaxed(BIT(ctl->wb->num),
ctl->mdata->mdp_base +
SDE_MDP_REG_INTR_CLEAR);
sde_mdp_writeback_intr_done(ctl);
rc = 0;
} else {
rc = -ENODEV;
WARN(1, "wb timeout (%d) ctl=%d\n",
rc, ctl->num);
if (ctl->irq_num >= 0)
disable_irq_nosync(ctl->irq_num);
}
} else {
rc = 0;
}
} else {
/* use polling if interrupt is not available */
int cnt = 200;
mask = BIT(ctl->wb->num);
do {
udelay(500);
isr = readl_relaxed(ctl->mdata->mdp_base +
SDE_MDP_REG_INTR_STATUS);
status = mask & isr;
cnt--;
} while (cnt > 0 && !status);
writel_relaxed(mask, ctl->mdata->mdp_base +
SDE_MDP_REG_INTR_CLEAR);
rc = (status) ? 0 : -ENODEV;
}
if (rc == 0)
ctx->end_time = ktime_get();
sde_smmu_ctrl(0);
ctx->comp_cnt--;
if (!rc) {
rot_time = (u64)ktime_to_us(ctx->end_time) -
(u64)ktime_to_us(ctx->start_time);
SDEROT_DBG(
"ctx%d type:%d xin_id:%d intf_num:%d took %llu microsecs\n",
ctx->wb_num, ctx->type, ctx->xin_id,
ctx->intf_num, rot_time);
}
SDEROT_DBG("s:%8.8x %s t:%llu c:%d\n", isr,
(rc)?"Timeout":"Done", rot_time, ctx->comp_cnt);
return rc;
}
static void sde_mdp_set_ot_limit_wb(struct sde_mdp_writeback_ctx *ctx)
{
struct sde_mdp_set_ot_params ot_params = {0,};
ot_params.xin_id = ctx->xin_id;
ot_params.num = ctx->wb_num;
ot_params.width = ctx->width;
ot_params.height = ctx->height;
ot_params.fps = 60;
ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
ot_params.reg_off_mdp_clk_ctrl = ctx->clk_ctrl.reg_off;
ot_params.bit_off_mdp_clk_ctrl = ctx->clk_ctrl.bit_off;
ot_params.fmt = (ctx->dst_fmt) ? ctx->dst_fmt->format : 0;
sde_mdp_set_ot_limit(&ot_params);
}
static int sde_mdp_writeback_display(struct sde_mdp_ctl *ctl, void *arg)
{
struct sde_mdp_writeback_ctx *ctx;
struct sde_mdp_writeback_arg *wb_args;
u32 flush_bits = 0;
int ret;
if (!ctl || !ctl->mdata)
return -ENODEV;
ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
if (!ctx)
return -ENODEV;
if (ctx->comp_cnt) {
SDEROT_ERR("previous kickoff not completed yet, ctl=%d\n",
ctl->num);
return -EPERM;
}
if (ctl->mdata->default_ot_wr_limit ||
ctl->mdata->default_ot_rd_limit)
sde_mdp_set_ot_limit_wb(ctx);
wb_args = (struct sde_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
ret = sde_mdp_writeback_addr_setup(ctx, wb_args->data);
if (ret) {
SDEROT_ERR("writeback data setup error ctl=%d\n", ctl->num);
return ret;
}
sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
sde_mdp_writeback_intr_done, ctl);
flush_bits |= ctl->flush_reg_data;
flush_bits |= BIT(16); /* WB */
sde_wb_write(ctx, SDE_MDP_REG_WB_DST_ADDR_SW_STATUS, ctl->is_secure);
sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_FLUSH, flush_bits);
reinit_completion(&ctx->wb_comp);
if (ctl->irq_num >= 0)
enable_irq(ctl->irq_num);
ret = sde_smmu_ctrl(1);
if (ret < 0) {
SDEROT_ERR("IOMMU attach failed\n");
return ret;
}
ctx->start_time = ktime_get();
sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_START, 1);
/* ensure that start command is issued after the barrier */
wmb();
SDEROT_DBG("ctx%d type:%d xin_id:%d intf_num:%d start\n",
ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
ctx->comp_cnt++;
return 0;
}
int sde_mdp_writeback_start(struct sde_mdp_ctl *ctl)
{
struct sde_mdp_writeback_ctx *ctx;
struct sde_mdp_writeback *wb;
u32 mem_sel;
SDEROT_DBG("start ctl=%d\n", ctl->num);
if (!ctl->wb) {
SDEROT_DBG("wb not setup in the ctl\n");
return 0;
}
wb = ctl->wb;
mem_sel = (ctl->opmode & 0xF) - 1;
if (mem_sel < SDE_MDP_MAX_WRITEBACK) {
ctx = &wb_ctx_list[mem_sel];
if (ctx->ref_cnt) {
SDEROT_ERR("writeback in use %d\n", mem_sel);
return -EBUSY;
}
ctx->ref_cnt++;
} else {
SDEROT_ERR("invalid writeback mode %d\n", mem_sel);
return -EINVAL;
}
ctl->priv_data = ctx;
ctx->wb_num = wb->num;
ctx->base = wb->base;
ctx->offset = wb->offset;
init_completion(&ctx->wb_comp);
if (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR)
ctl->ops.prepare_fnc = sde_mdp_writeback_prepare_rot;
ctl->ops.stop_fnc = sde_mdp_writeback_stop;
ctl->ops.display_fnc = sde_mdp_writeback_display;
ctl->ops.wait_fnc = sde_mdp_wb_wait4comp;
return 0;
}
int sde_mdp_writeback_display_commit(struct sde_mdp_ctl *ctl, void *arg)
{
return sde_mdp_display_commit(ctl, arg, NULL);
}

ファイル差分が大きすぎるため省略します 差分を読み込み

ファイルの表示

@@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_R3_H__
#define __SDE_ROTATOR_R3_H__
#include "sde_rotator_core.h"
/* Maximum allowed Rotator clock value */
#define ROT_R3_MAX_ROT_CLK 345000000
int sde_rotator_r3_init(struct sde_rot_mgr *mgr);
#endif /* __SDE_ROTATOR_R3_H__ */

ファイルの表示

@@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include "sde_rotator_r3_debug.h"
#include "sde_rotator_core.h"
#include "sde_rotator_r3.h"
#include "sde_rotator_r3_internal.h"
#if defined(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG) && \
defined(CONFIG_DEBUG_FS)
/*
* sde_rotator_r3_create_debugfs - Setup rotator r3 debugfs directory structure.
* @rot_dev: Pointer to rotator device
*/
int sde_rotator_r3_create_debugfs(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root)
{
struct sde_hw_rotator *hw_data;
if (!mgr || !debugfs_root || !mgr->hw_data)
return -EINVAL;
hw_data = mgr->hw_data;
debugfs_create_bool("dbgmem", 0644, debugfs_root, &hw_data->dbgmem);
debugfs_create_u32("koff_timeout", 0644, debugfs_root, &hw_data->koff_timeout);
debugfs_create_u32("vid_trigger", 0644, debugfs_root, &hw_data->vid_trigger);
debugfs_create_u32("cmd_trigger", 0644, debugfs_root, &hw_data->cmd_trigger);
debugfs_create_u32("sbuf_headroom", 0644, debugfs_root, &hw_data->sbuf_headroom);
debugfs_create_u32("solid_fill", 0644, debugfs_root, &hw_data->solid_fill);
return 0;
}
#endif

ファイルの表示

@@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_R3_DEBUG_H__
#define __SDE_ROTATOR_R3_DEBUG_H__
#include <linux/types.h>
#include <linux/dcache.h>
struct sde_rot_mgr;
#if defined(CONFIG_DEBUG_FS)
int sde_rotator_r3_create_debugfs(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root);
#else
static inline
int sde_rotator_r3_create_debugfs(struct sde_rot_mgr *mgr,
struct dentry *debugfs_root)
{
return 0;
}
#endif
#endif /* __SDE_ROTATOR_R3_DEBUG_H__ */

ファイルの表示

@@ -0,0 +1,305 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_ROTATOR_R3_HWIO_H
#define _SDE_ROTATOR_R3_HWIO_H
#include <linux/bitops.h>
/* MMSS_MDSS:
* OFFSET=0x000000
*/
#define MMSS_MDSS_HW_INTR_STATUS 0x10
#define MMSS_MDSS_HW_INTR_STATUS_ROT BIT(2)
/* SDE_ROT_ROTTOP:
* OFFSET=0x0A9000
*/
#define SDE_ROT_ROTTOP_OFFSET 0x0A9000
#define ROTTOP_HW_VERSION (SDE_ROT_ROTTOP_OFFSET+0x00)
#define ROTTOP_CLK_CTRL (SDE_ROT_ROTTOP_OFFSET+0x10)
#define ROTTOP_CLK_STATUS (SDE_ROT_ROTTOP_OFFSET+0x14)
#define ROTTOP_ROT_NEWROI_PRIOR_TO_START (SDE_ROT_ROTTOP_OFFSET+0x18)
#define ROTTOP_SW_RESET (SDE_ROT_ROTTOP_OFFSET+0x20)
#define ROTTOP_SW_RESET_CTRL (SDE_ROT_ROTTOP_OFFSET+0x24)
#define ROTTOP_SW_RESET_OVERRIDE (SDE_ROT_ROTTOP_OFFSET+0x28)
#define ROTTOP_INTR_EN (SDE_ROT_ROTTOP_OFFSET+0x30)
#define ROTTOP_INTR_STATUS (SDE_ROT_ROTTOP_OFFSET+0x34)
#define ROTTOP_INTR_CLEAR (SDE_ROT_ROTTOP_OFFSET+0x38)
#define ROTTOP_START_CTRL (SDE_ROT_ROTTOP_OFFSET+0x40)
#define ROTTOP_STATUS (SDE_ROT_ROTTOP_OFFSET+0x44)
#define ROTTOP_OP_MODE (SDE_ROT_ROTTOP_OFFSET+0x48)
#define ROTTOP_DNSC (SDE_ROT_ROTTOP_OFFSET+0x4C)
#define ROTTOP_DEBUGBUS_CTRL (SDE_ROT_ROTTOP_OFFSET+0x50)
#define ROTTOP_DEBUGBUS_STATUS (SDE_ROT_ROTTOP_OFFSET+0x54)
#define ROTTOP_ROT_UBWC_DEC_VERSION (SDE_ROT_ROTTOP_OFFSET+0x58)
#define ROTTOP_ROT_UBWC_ENC_VERSION (SDE_ROT_ROTTOP_OFFSET+0x5C)
#define ROTTOP_ROT_CNTR_CTRL (SDE_ROT_ROTTOP_OFFSET+0x60)
#define ROTTOP_ROT_CNTR_0 (SDE_ROT_ROTTOP_OFFSET+0x64)
#define ROTTOP_ROT_CNTR_1 (SDE_ROT_ROTTOP_OFFSET+0x68)
#define ROTTOP_ROT_SCRATCH_0 (SDE_ROT_ROTTOP_OFFSET+0x70)
#define ROTTOP_ROT_SCRATCH_1 (SDE_ROT_ROTTOP_OFFSET+0x74)
#define ROTTOP_ROT_SCRATCH_2 (SDE_ROT_ROTTOP_OFFSET+0x78)
#define ROTTOP_ROT_SCRATCH_3 (SDE_ROT_ROTTOP_OFFSET+0x7C)
#define ROTTOP_START_CTRL_TRIG_SEL_SW 0
#define ROTTOP_START_CTRL_TRIG_SEL_DONE 1
#define ROTTOP_START_CTRL_TRIG_SEL_REGDMA 2
#define ROTTOP_START_CTRL_TRIG_SEL_MDP 3
#define ROTTOP_OP_MODE_ROT_OUT_MASK (0x3 << 4)
/* SDE_ROT_SSPP:
* OFFSET=0x0A9100
*/
#define SDE_ROT_SSPP_OFFSET 0x0A9100
#define ROT_SSPP_SRC_SIZE (SDE_ROT_SSPP_OFFSET+0x00)
#define ROT_SSPP_SRC_IMG_SIZE (SDE_ROT_SSPP_OFFSET+0x04)
#define ROT_SSPP_SRC_XY (SDE_ROT_SSPP_OFFSET+0x08)
#define ROT_SSPP_OUT_SIZE (SDE_ROT_SSPP_OFFSET+0x0C)
#define ROT_SSPP_OUT_XY (SDE_ROT_SSPP_OFFSET+0x10)
#define ROT_SSPP_SRC0_ADDR (SDE_ROT_SSPP_OFFSET+0x14)
#define ROT_SSPP_SRC1_ADDR (SDE_ROT_SSPP_OFFSET+0x18)
#define ROT_SSPP_SRC2_ADDR (SDE_ROT_SSPP_OFFSET+0x1C)
#define ROT_SSPP_SRC3_ADDR (SDE_ROT_SSPP_OFFSET+0x20)
#define ROT_SSPP_SRC_YSTRIDE0 (SDE_ROT_SSPP_OFFSET+0x24)
#define ROT_SSPP_SRC_YSTRIDE1 (SDE_ROT_SSPP_OFFSET+0x28)
#define ROT_SSPP_TILE_FRAME_SIZE (SDE_ROT_SSPP_OFFSET+0x2C)
#define ROT_SSPP_SRC_FORMAT (SDE_ROT_SSPP_OFFSET+0x30)
#define ROT_SSPP_SRC_UNPACK_PATTERN (SDE_ROT_SSPP_OFFSET+0x34)
#define ROT_SSPP_SRC_OP_MODE (SDE_ROT_SSPP_OFFSET+0x38)
#define ROT_SSPP_SRC_CONSTANT_COLOR (SDE_ROT_SSPP_OFFSET+0x3C)
#define ROT_SSPP_UBWC_STATIC_CTRL (SDE_ROT_SSPP_OFFSET+0x44)
#define ROT_SSPP_FETCH_CONFIG (SDE_ROT_SSPP_OFFSET+0x48)
#define ROT_SSPP_VC1_RANGE (SDE_ROT_SSPP_OFFSET+0x4C)
#define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_0 (SDE_ROT_SSPP_OFFSET+0x50)
#define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_1 (SDE_ROT_SSPP_OFFSET+0x54)
#define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_2 (SDE_ROT_SSPP_OFFSET+0x58)
#define ROT_SSPP_DANGER_LUT (SDE_ROT_SSPP_OFFSET+0x60)
#define ROT_SSPP_SAFE_LUT (SDE_ROT_SSPP_OFFSET+0x64)
#define ROT_SSPP_CREQ_LUT (SDE_ROT_SSPP_OFFSET+0x68)
#define ROT_SSPP_QOS_CTRL (SDE_ROT_SSPP_OFFSET+0x6C)
#define ROT_SSPP_SRC_ADDR_SW_STATUS (SDE_ROT_SSPP_OFFSET+0x70)
#define ROT_SSPP_CREQ_LUT_0 (SDE_ROT_SSPP_OFFSET+0x74)
#define ROT_SSPP_CREQ_LUT_1 (SDE_ROT_SSPP_OFFSET+0x78)
#define ROT_SSPP_CURRENT_SRC0_ADDR (SDE_ROT_SSPP_OFFSET+0xA4)
#define ROT_SSPP_CURRENT_SRC1_ADDR (SDE_ROT_SSPP_OFFSET+0xA8)
#define ROT_SSPP_CURRENT_SRC2_ADDR (SDE_ROT_SSPP_OFFSET+0xAC)
#define ROT_SSPP_CURRENT_SRC3_ADDR (SDE_ROT_SSPP_OFFSET+0xB0)
#define ROT_SSPP_DECIMATION_CONFIG (SDE_ROT_SSPP_OFFSET+0xB4)
#define ROT_SSPP_FETCH_SMP_WR_PLANE0 (SDE_ROT_SSPP_OFFSET+0xD0)
#define ROT_SSPP_FETCH_SMP_WR_PLANE1 (SDE_ROT_SSPP_OFFSET+0xD4)
#define ROT_SSPP_FETCH_SMP_WR_PLANE2 (SDE_ROT_SSPP_OFFSET+0xD8)
#define ROT_SSPP_SMP_UNPACK_RD_PLANE0 (SDE_ROT_SSPP_OFFSET+0xE0)
#define ROT_SSPP_SMP_UNPACK_RD_PLANE1 (SDE_ROT_SSPP_OFFSET+0xE4)
#define ROT_SSPP_SMP_UNPACK_RD_PLANE2 (SDE_ROT_SSPP_OFFSET+0xE8)
#define ROT_SSPP_FILL_LEVELS (SDE_ROT_SSPP_OFFSET+0xF0)
#define ROT_SSPP_STATUS (SDE_ROT_SSPP_OFFSET+0xF4)
#define ROT_SSPP_UNPACK_LINE_COUNT (SDE_ROT_SSPP_OFFSET+0xF8)
#define ROT_SSPP_UNPACK_BLK_COUNT (SDE_ROT_SSPP_OFFSET+0xFC)
#define ROT_SSPP_SW_PIX_EXT_C0_LR (SDE_ROT_SSPP_OFFSET+0x100)
#define ROT_SSPP_SW_PIX_EXT_C0_TB (SDE_ROT_SSPP_OFFSET+0x104)
#define ROT_SSPP_SW_PIX_EXT_C0_REQ_PIXELS (SDE_ROT_SSPP_OFFSET+0x108)
#define ROT_SSPP_SW_PIX_EXT_C1C2_LR (SDE_ROT_SSPP_OFFSET+0x110)
#define ROT_SSPP_SW_PIX_EXT_C1C2_TB (SDE_ROT_SSPP_OFFSET+0x114)
#define ROT_SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS (SDE_ROT_SSPP_OFFSET+0x118)
#define ROT_SSPP_SW_PIX_EXT_C3_LR (SDE_ROT_SSPP_OFFSET+0x120)
#define ROT_SSPP_SW_PIX_EXT_C3_TB (SDE_ROT_SSPP_OFFSET+0x124)
#define ROT_SSPP_SW_PIX_EXT_C3_REQ_PIXELS (SDE_ROT_SSPP_OFFSET+0x128)
#define ROT_SSPP_TRAFFIC_SHAPER (SDE_ROT_SSPP_OFFSET+0x130)
#define ROT_SSPP_CDP_CNTL (SDE_ROT_SSPP_OFFSET+0x134)
#define ROT_SSPP_UBWC_ERROR_STATUS (SDE_ROT_SSPP_OFFSET+0x138)
#define ROT_SSPP_SW_CROP_W_C0C3 (SDE_ROT_SSPP_OFFSET+0x140)
#define ROT_SSPP_SW_CROP_W_C1C2 (SDE_ROT_SSPP_OFFSET+0x144)
#define ROT_SSPP_SW_CROP_H_C0C3 (SDE_ROT_SSPP_OFFSET+0x148)
#define ROT_SSPP_SW_CROP_H_C1C2 (SDE_ROT_SSPP_OFFSET+0x14C)
#define ROT_SSPP_TRAFFIC_SHAPER_PREFILL (SDE_ROT_SSPP_OFFSET+0x150)
#define ROT_SSPP_TRAFFIC_SHAPER_REC1_PREFILL (SDE_ROT_SSPP_OFFSET+0x154)
#define ROT_SSPP_OUT_SIZE_REC1 (SDE_ROT_SSPP_OFFSET+0x160)
#define ROT_SSPP_OUT_XY_REC1 (SDE_ROT_SSPP_OFFSET+0x164)
#define ROT_SSPP_SRC_XY_REC1 (SDE_ROT_SSPP_OFFSET+0x168)
#define ROT_SSPP_SRC_SIZE_REC1 (SDE_ROT_SSPP_OFFSET+0x16C)
#define ROT_SSPP_MULTI_REC_OP_MODE (SDE_ROT_SSPP_OFFSET+0x170)
#define ROT_SSPP_SRC_FORMAT_REC1 (SDE_ROT_SSPP_OFFSET+0x174)
#define ROT_SSPP_SRC_UNPACK_PATTERN_REC1 (SDE_ROT_SSPP_OFFSET+0x178)
#define ROT_SSPP_SRC_OP_MODE_REC1 (SDE_ROT_SSPP_OFFSET+0x17C)
#define ROT_SSPP_SRC_CONSTANT_COLOR_REC1 (SDE_ROT_SSPP_OFFSET+0x180)
#define ROT_SSPP_TPG_CONTROL (SDE_ROT_SSPP_OFFSET+0x190)
#define ROT_SSPP_TPG_CONFIG (SDE_ROT_SSPP_OFFSET+0x194)
#define ROT_SSPP_TPG_COMPONENT_LIMITS (SDE_ROT_SSPP_OFFSET+0x198)
#define ROT_SSPP_TPG_RECTANGLE (SDE_ROT_SSPP_OFFSET+0x19C)
#define ROT_SSPP_TPG_BLACK_WHITE_PATTERN_FRAMES (SDE_ROT_SSPP_OFFSET+0x1A0)
#define ROT_SSPP_TPG_RGB_MAPPING (SDE_ROT_SSPP_OFFSET+0x1A4)
#define ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL (SDE_ROT_SSPP_OFFSET+0x1A8)
#define SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE 0x00087
#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_128 (0 << 16)
#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_96 (2 << 16)
#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT ((0 << 16) | (1 << 15))
#define SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT ((2 << 16) | (1 << 15))
/* SDE_ROT_WB:
* OFFSET=0x0A9300
*/
#define SDE_ROT_WB_OFFSET 0x0A9300
#define ROT_WB_DST_FORMAT (SDE_ROT_WB_OFFSET+0x000)
#define ROT_WB_DST_OP_MODE (SDE_ROT_WB_OFFSET+0x004)
#define ROT_WB_DST_PACK_PATTERN (SDE_ROT_WB_OFFSET+0x008)
#define ROT_WB_DST0_ADDR (SDE_ROT_WB_OFFSET+0x00C)
#define ROT_WB_DST1_ADDR (SDE_ROT_WB_OFFSET+0x010)
#define ROT_WB_DST2_ADDR (SDE_ROT_WB_OFFSET+0x014)
#define ROT_WB_DST3_ADDR (SDE_ROT_WB_OFFSET+0x018)
#define ROT_WB_DST_YSTRIDE0 (SDE_ROT_WB_OFFSET+0x01C)
#define ROT_WB_DST_YSTRIDE1 (SDE_ROT_WB_OFFSET+0x020)
#define ROT_WB_DST_DITHER_BITDEPTH (SDE_ROT_WB_OFFSET+0x024)
#define ROT_WB_DITHER_MATRIX_ROW0 (SDE_ROT_WB_OFFSET+0x030)
#define ROT_WB_DITHER_MATRIX_ROW1 (SDE_ROT_WB_OFFSET+0x034)
#define ROT_WB_DITHER_MATRIX_ROW2 (SDE_ROT_WB_OFFSET+0x038)
#define ROT_WB_DITHER_MATRIX_ROW3 (SDE_ROT_WB_OFFSET+0x03C)
#define ROT_WB_TRAFFIC_SHAPER_WR_CLIENT (SDE_ROT_WB_OFFSET+0x040)
#define ROT_WB_DST_WRITE_CONFIG (SDE_ROT_WB_OFFSET+0x048)
#define ROT_WB_ROTATOR_PIPE_DOWNSCALER (SDE_ROT_WB_OFFSET+0x054)
#define ROT_WB_OUT_SIZE (SDE_ROT_WB_OFFSET+0x074)
#define ROT_WB_DST_ALPHA_X_VALUE (SDE_ROT_WB_OFFSET+0x078)
#define ROT_WB_HW_VERSION (SDE_ROT_WB_OFFSET+0x080)
#define ROT_WB_DANGER_LUT (SDE_ROT_WB_OFFSET+0x084)
#define ROT_WB_SAFE_LUT (SDE_ROT_WB_OFFSET+0x088)
#define ROT_WB_CREQ_LUT (SDE_ROT_WB_OFFSET+0x08C)
#define ROT_WB_QOS_CTRL (SDE_ROT_WB_OFFSET+0x090)
#define ROT_WB_SYS_CACHE_MODE (SDE_ROT_WB_OFFSET+0x094)
#define ROT_WB_CREQ_LUT_0 (SDE_ROT_WB_OFFSET+0x098)
#define ROT_WB_CREQ_LUT_1 (SDE_ROT_WB_OFFSET+0x09C)
#define ROT_WB_UBWC_STATIC_CTRL (SDE_ROT_WB_OFFSET+0x144)
#define ROT_WB_SBUF_STATUS_PLANE0 (SDE_ROT_WB_OFFSET+0x148)
#define ROT_WB_SBUF_STATUS_PLANE1 (SDE_ROT_WB_OFFSET+0x14C)
#define ROT_WB_CSC_MATRIX_COEFF_0 (SDE_ROT_WB_OFFSET+0x260)
#define ROT_WB_CSC_MATRIX_COEFF_1 (SDE_ROT_WB_OFFSET+0x264)
#define ROT_WB_CSC_MATRIX_COEFF_2 (SDE_ROT_WB_OFFSET+0x268)
#define ROT_WB_CSC_MATRIX_COEFF_3 (SDE_ROT_WB_OFFSET+0x26C)
#define ROT_WB_CSC_MATRIX_COEFF_4 (SDE_ROT_WB_OFFSET+0x270)
#define ROT_WB_CSC_COMP0_PRECLAMP (SDE_ROT_WB_OFFSET+0x274)
#define ROT_WB_CSC_COMP1_PRECLAMP (SDE_ROT_WB_OFFSET+0x278)
#define ROT_WB_CSC_COMP2_PRECLAMP (SDE_ROT_WB_OFFSET+0x27C)
#define ROT_WB_CSC_COMP0_POSTCLAMP (SDE_ROT_WB_OFFSET+0x280)
#define ROT_WB_CSC_COMP1_POSTCLAMP (SDE_ROT_WB_OFFSET+0x284)
#define ROT_WB_CSC_COMP2_POSTCLAMP (SDE_ROT_WB_OFFSET+0x288)
#define ROT_WB_CSC_COMP0_PREBIAS (SDE_ROT_WB_OFFSET+0x28C)
#define ROT_WB_CSC_COMP1_PREBIAS (SDE_ROT_WB_OFFSET+0x290)
#define ROT_WB_CSC_COMP2_PREBIAS (SDE_ROT_WB_OFFSET+0x294)
#define ROT_WB_CSC_COMP0_POSTBIAS (SDE_ROT_WB_OFFSET+0x298)
#define ROT_WB_CSC_COMP1_POSTBIAS (SDE_ROT_WB_OFFSET+0x29C)
#define ROT_WB_CSC_COMP2_POSTBIAS (SDE_ROT_WB_OFFSET+0x2A0)
#define ROT_WB_DST_ADDR_SW_STATUS (SDE_ROT_WB_OFFSET+0x2B0)
#define ROT_WB_CDP_CNTL (SDE_ROT_WB_OFFSET+0x2B4)
#define ROT_WB_STATUS (SDE_ROT_WB_OFFSET+0x2B8)
#define ROT_WB_UBWC_ERROR_STATUS (SDE_ROT_WB_OFFSET+0x2BC)
#define ROT_WB_OUT_IMG_SIZE (SDE_ROT_WB_OFFSET+0x2C0)
#define ROT_WB_OUT_XY (SDE_ROT_WB_OFFSET+0x2C4)
/* SDE_ROT_REGDMA_RAM:
* OFFSET=0x0A9600
*/
#define SDE_ROT_REGDMA_RAM_OFFSET 0x0A9600
#define REGDMA_RAM_REGDMA_CMD_RAM (SDE_ROT_REGDMA_RAM_OFFSET+0x00)
/* SDE_ROT_REGDMA_CSR:
* OFFSET=0x0AB600
*/
#define SDE_ROT_REGDMA_OFFSET 0x0AB600
#define REGDMA_CSR_REGDMA_VERSION (SDE_ROT_REGDMA_OFFSET+0x00)
#define REGDMA_CSR_REGDMA_OP_MODE (SDE_ROT_REGDMA_OFFSET+0x04)
#define REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT (SDE_ROT_REGDMA_OFFSET+0x10)
#define REGDMA_CSR_REGDMA_QUEUE_0_STATUS (SDE_ROT_REGDMA_OFFSET+0x14)
#define REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT (SDE_ROT_REGDMA_OFFSET+0x18)
#define REGDMA_CSR_REGDMA_QUEUE_1_STATUS (SDE_ROT_REGDMA_OFFSET+0x1C)
#define REGDMA_CSR_REGDMA_BLOCK_LO_0 (SDE_ROT_REGDMA_OFFSET+0x20)
#define REGDMA_CSR_REGDMA_BLOCK_HI_0 (SDE_ROT_REGDMA_OFFSET+0x24)
#define REGDMA_CSR_REGDMA_BLOCK_LO_1 (SDE_ROT_REGDMA_OFFSET+0x28)
#define REGDMA_CSR_REGDMA_BLOCK_HI_1 (SDE_ROT_REGDMA_OFFSET+0x2C)
#define REGDMA_CSR_REGDMA_BLOCK_LO_2 (SDE_ROT_REGDMA_OFFSET+0x30)
#define REGDMA_CSR_REGDMA_BLOCK_HI_2 (SDE_ROT_REGDMA_OFFSET+0x34)
#define REGDMA_CSR_REGDMA_BLOCK_LO_3 (SDE_ROT_REGDMA_OFFSET+0x38)
#define REGDMA_CSR_REGDMA_BLOCK_HI_3 (SDE_ROT_REGDMA_OFFSET+0x3C)
#define REGDMA_CSR_REGDMA_WD_TIMER_CTL (SDE_ROT_REGDMA_OFFSET+0x40)
#define REGDMA_CSR_REGDMA_WD_TIMER_CTL2 (SDE_ROT_REGDMA_OFFSET+0x44)
#define REGDMA_CSR_REGDMA_WD_TIMER_LOAD_VALUE (SDE_ROT_REGDMA_OFFSET+0x48)
#define REGDMA_CSR_REGDMA_WD_TIMER_STATUS_VALUE (SDE_ROT_REGDMA_OFFSET+0x4C)
#define REGDMA_CSR_REGDMA_INT_STATUS (SDE_ROT_REGDMA_OFFSET+0x50)
#define REGDMA_CSR_REGDMA_INT_EN (SDE_ROT_REGDMA_OFFSET+0x54)
#define REGDMA_CSR_REGDMA_INT_CLEAR (SDE_ROT_REGDMA_OFFSET+0x58)
#define REGDMA_CSR_REGDMA_BLOCK_STATUS (SDE_ROT_REGDMA_OFFSET+0x5C)
#define REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET (SDE_ROT_REGDMA_OFFSET+0x60)
#define REGDMA_CSR_REGDMA_FSM_STATE (SDE_ROT_REGDMA_OFFSET+0x64)
#define REGDMA_CSR_REGDMA_DEBUG_SEL (SDE_ROT_REGDMA_OFFSET+0x68)
/* SDE_ROT_QDSS:
* OFFSET=0x0AAF00
*/
#define ROT_QDSS_CONFIG 0x00
#define ROT_QDSS_ATB_DATA_ENABLE0 0x04
#define ROT_QDSS_ATB_DATA_ENABLE1 0x08
#define ROT_QDSS_ATB_DATA_ENABLE2 0x0C
#define ROT_QDSS_ATB_DATA_ENABLE3 0x10
#define ROT_QDSS_CLK_CTRL 0x14
#define ROT_QDSS_CLK_STATUS 0x18
#define ROT_QDSS_PULSE_TRIGGER 0x20
/*
* SDE_ROT_VBIF_NRT:
*/
#define SDE_ROT_VBIF_NRT_OFFSET 0
/* REGDMA OP Code */
#define REGDMA_OP_NOP (0 << 28)
#define REGDMA_OP_REGWRITE (1 << 28)
#define REGDMA_OP_REGMODIFY (2 << 28)
#define REGDMA_OP_BLKWRITE_SINGLE (3 << 28)
#define REGDMA_OP_BLKWRITE_INC (4 << 28)
#define REGDMA_OP_MASK 0xF0000000
/* REGDMA ADDR offset Mask */
#define REGDMA_ADDR_OFFSET_MASK 0xFFFFF
/* REGDMA command trigger select */
#define REGDMA_CMD_TRIG_SEL_SW_START (0 << 27)
#define REGDMA_CMD_TRIG_SEL_MDP_FLUSH (1 << 27)
/* General defines */
#define ROT_DONE_MASK 0x1
#define ROT_DONE_CLEAR 0x1
#define ROT_BUSY_BIT BIT(0)
#define ROT_ERROR_BIT BIT(8)
#define ROT_STATUS_MASK (ROT_BUSY_BIT | ROT_ERROR_BIT)
#define REGDMA_BUSY BIT(0)
#define REGDMA_EN 0x1
#define REGDMA_SECURE_EN BIT(8)
#define REGDMA_HALT BIT(16)
#define REGDMA_WATCHDOG_INT BIT(19)
#define REGDMA_INVALID_DESCRIPTOR BIT(18)
#define REGDMA_INCOMPLETE_CMD BIT(17)
#define REGDMA_INVALID_CMD BIT(16)
#define REGDMA_QUEUE1_INT2 BIT(10)
#define REGDMA_QUEUE1_INT1 BIT(9)
#define REGDMA_QUEUE1_INT0 BIT(8)
#define REGDMA_QUEUE0_INT2 BIT(2)
#define REGDMA_QUEUE0_INT1 BIT(1)
#define REGDMA_QUEUE0_INT0 BIT(0)
#define REGDMA_INT_MASK 0x000F0707
#define REGDMA_INT_HIGH_MASK 0x00000007
#define REGDMA_INT_LOW_MASK 0x00000700
#define REGDMA_INT_ERR_MASK 0x000F0000
#define REGDMA_TIMESTAMP_REG ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL
#define REGDMA_RESET_STATUS_REG ROT_SSPP_TPG_RGB_MAPPING
#define REGDMA_INT_0_MASK 0x101
#define REGDMA_INT_1_MASK 0x202
#define REGDMA_INT_2_MASK 0x404
#endif /*_SDE_ROTATOR_R3_HWIO_H */

ファイルの表示

@@ -0,0 +1,452 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_ROTATOR_R3_INTERNAL_H
#define _SDE_ROTATOR_R3_INTERNAL_H
#include "sde_rotator_core.h"
struct sde_hw_rotator;
struct sde_hw_rotator_context;
/**
* Flags
*/
#define SDE_ROT_FLAG_SECURE_OVERLAY_SESSION 0x1
#define SDE_ROT_FLAG_FLIP_LR 0x2
#define SDE_ROT_FLAG_FLIP_UD 0x4
#define SDE_ROT_FLAG_SOURCE_ROTATED_90 0x8
#define SDE_ROT_FLAG_ROT_90 0x10
#define SDE_ROT_FLAG_DEINTERLACE 0x20
#define SDE_ROT_FLAG_SECURE_CAMERA_SESSION 0x40
/**
* General defines
*/
#define SDE_HW_ROT_REGDMA_RAM_SIZE 1024
#define SDE_HW_ROT_REGDMA_TOTAL_CTX 8
#define SDE_HW_ROT_REGDMA_SEG_MASK (SDE_HW_ROT_REGDMA_TOTAL_CTX - 1)
#define SDE_HW_ROT_REGDMA_SEG_SIZE \
(SDE_HW_ROT_REGDMA_RAM_SIZE / SDE_HW_ROT_REGDMA_TOTAL_CTX)
#define SDE_REGDMA_SWTS_MASK 0x00000FFF
#define SDE_REGDMA_SWTS_SHIFT 12
enum sde_rot_queue_prio {
ROT_QUEUE_HIGH_PRIORITY,
ROT_QUEUE_LOW_PRIORITY,
ROT_QUEUE_MAX
};
enum sde_rot_angle {
ROT_ANGLE_0,
ROT_ANGLE_90,
ROT_ANGEL_MAX
};
enum sde_rotator_regdma_mode {
ROT_REGDMA_OFF,
ROT_REGDMA_ON,
ROT_REGDMA_MAX
};
/**
* struct sde_hw_rot_sspp_cfg: Rotator SSPP Configration description
* @src: source surface information
* @src_rect: src ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
* @addr: source surface address
*/
struct sde_hw_rot_sspp_cfg {
struct sde_mdp_format_params *fmt;
struct sde_mdp_plane_sizes src_plane;
struct sde_rect *src_rect;
struct sde_mdp_data *data;
u32 img_width;
u32 img_height;
u32 fps;
u64 bw;
};
/**
* struct sde_hw_rot_wb_cfg: Rotator WB Configration description
* @dest: destination surface information
* @dest_rect: dest ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
* @addr: destination surface address
* @prefill_bw: prefill bandwidth in Bps
*/
struct sde_hw_rot_wb_cfg {
struct sde_mdp_format_params *fmt;
struct sde_mdp_plane_sizes dst_plane;
struct sde_rect *dst_rect;
struct sde_mdp_data *data;
u32 img_width;
u32 img_height;
u32 v_downscale_factor;
u32 h_downscale_factor;
u32 fps;
u64 bw;
u64 prefill_bw;
};
/**
*
* struct sde_hw_rotator_ops: Interface to the Rotator Hw driver functions
*
* Pre-requsises:
* - Caller must call the init function to get the rotator context
* - These functions will be called after clocks are enabled
*/
struct sde_hw_rotator_ops {
/**
* setup_rotator_fetchengine():
* Setup Source format
* Setup Source dimension/cropping rectangle (ROI)
* Setup Source surface base address and stride
* Setup fetch engine op mode (linear/tiled/compression/...)
* @ctx: Rotator context created in sde_hw_rotator_config
* @queue_id: Select either low / high priority queue
* @cfg: Rotator Fetch engine configuration parameters
* @danger_lut: Danger LUT setting
* @safe_lut: Safe LUT setting
* @dnsc_factor_w: Downscale factor for width
* @dnsc_factor_h: Downscale factor for height
* @flags: Specific config flag, see SDE_ROT_FLAG_ for details
*/
void (*setup_rotator_fetchengine)(
struct sde_hw_rotator_context *ctx,
enum sde_rot_queue_prio queue_id,
struct sde_hw_rot_sspp_cfg *cfg,
u32 danger_lut,
u32 safe_lut,
u32 dnsc_factor_w,
u32 dnsc_factor_h,
u32 flags);
/**
* setup_rotator_wbengine():
* Setup destination formats
* Setup destination dimension/cropping rectangle (ROI)
* Setup destination surface base address and strides
* Setup writeback engine op mode (linear/tiled/compression)
* @ctx: Rotator context created in sde_hw_rotator_config
* @queue_id: Select either low / high priority queue
* @cfg: Rotator WriteBack engine configuration parameters
* @flags: Specific config flag, see SDE_ROT_FLAG_ for details
*/
void (*setup_rotator_wbengine)(
struct sde_hw_rotator_context *ctx,
enum sde_rot_queue_prio queue_id,
struct sde_hw_rot_wb_cfg *cfg,
u32 flags);
/**
* start_rotator():
* Kick start rotator operation based on cached setup parameters
* REGDMA commands will get generated at this points
* @ctx: Rotator context
* @queue_id: Select either low / high priority queue
* Returns: unique job timestamp per submit. Used for tracking
* rotator finished job.
*/
u32 (*start_rotator)(
struct sde_hw_rotator_context *ctx,
enum sde_rot_queue_prio queue_id);
/**
* wait_rotator_done():
* Notify Rotator HAL layer previously submitted job finished.
* A job timestamp will return to caller.
* @ctx: Rotator context
* @flags: Reserved
* Returns: job timestamp for tracking purpose
*
*/
u32 (*wait_rotator_done)(
struct sde_hw_rotator_context *ctx,
enum sde_rot_queue_prio queue_id,
u32 flags);
/**
* get_pending_ts():
* Obtain current active timestamp from rotator hw
* @rot: HW Rotator structure
* @ctx: Rotator context
* @ts: current timestamp return from rot hw
* Returns: true if context has pending requests
*/
int (*get_pending_ts)(
struct sde_hw_rotator *rot,
struct sde_hw_rotator_context *ctx,
u32 *ts);
/**
* update_ts():
* Update rotator timestmap with given value
* @rot: HW Rotator structure
* @q_id: rotator queue id
* @ts: new timestamp for rotator
*/
void (*update_ts)(
struct sde_hw_rotator *rot,
u32 q_id,
u32 ts);
};
/**
* struct sde_dbg_buf : Debug buffer used by debugfs
* @vaddr: VA address mapped from dma buffer
* @dmabuf: DMA buffer
* @buflen: Length of DMA buffer
* @width: pixel width of buffer
* @height: pixel height of buffer
*/
struct sde_dbg_buf {
void *vaddr;
struct dma_buf *dmabuf;
unsigned long buflen;
u32 width;
u32 height;
};
/**
* struct sde_hw_rotator_context : Each rotator context ties to each priority
* queue. Max number of concurrent contexts in regdma is limited to regdma
* ram segment size allocation. Each rotator context can be any priority. A
* incremental timestamp is used to identify and assigned to each context.
* @list: list of pending context
* @sequence_id: unique sequence identifier for rotation request
* @sbuf_mode: true if stream buffer is requested
* @start_ctrl: start control register update value
* @sys_cache_mode: sys cache mode register update value
* @op_mode: rot top op mode selection
* @last_entry: pointer to last configured entry (for debugging purposes)
*/
struct sde_hw_rotator_context {
struct list_head list;
struct sde_hw_rotator *rot;
struct sde_rot_hw_resource *hwres;
enum sde_rot_queue_prio q_id;
u32 session_id;
u32 sequence_id;
char __iomem *regdma_base;
char __iomem *regdma_wrptr;
u32 timestamp;
struct completion rot_comp;
wait_queue_head_t regdma_waitq;
struct sde_dbg_buf src_dbgbuf;
struct sde_dbg_buf dst_dbgbuf;
u32 last_regdma_isr_status;
u32 last_regdma_timestamp;
dma_addr_t ts_addr;
bool is_secure;
bool is_traffic_shaping;
bool sbuf_mode;
bool abort;
u32 start_ctrl;
u32 sys_cache_mode;
u32 op_mode;
struct sde_rot_entry *last_entry;
};
/**
* struct sde_hw_rotator_resource_info : Each rotator resource ties to each
* priority queue
*/
struct sde_hw_rotator_resource_info {
struct sde_hw_rotator *rot;
struct sde_rot_hw_resource hw;
};
/**
* struct sde_hw_rotator : Rotator description
* @hw: mdp register mapped offset
* @ops: pointer to operations possible for the rotator HW
* @highest_bank: highest bank size of memory
* @ubwc_malsize: ubwc minimum allowable length
* @ubwc_swizzle: ubwc swizzle enable
* @sbuf_headroom: stream buffer headroom in lines
* @solid_fill: true if solid fill is requested
* @constant_color: solid fill constant color
* @sbuf_ctx: list of active sbuf context in FIFO order
* @vid_trigger: video mode trigger select
* @cmd_trigger: command mode trigger select
* @inpixfmts: array of supported input pixel formats fourcc per mode
* @num_inpixfmt: size of the supported input pixel format array per mode
* @outpixfmts: array of supported output pixel formats in fourcc per mode
* @num_outpixfmt: size of the supported output pixel formats array per mode
* @downscale_caps: capability string of scaling
* @maxlinewidth: maximum line width supported
*/
struct sde_hw_rotator {
/* base */
char __iomem *mdss_base;
/* Platform device from upper manager */
struct platform_device *pdev;
/* Ops */
struct sde_hw_rotator_ops ops;
/* Cmd Queue */
u32 cmd_queue[SDE_HW_ROT_REGDMA_RAM_SIZE];
/* Cmd Queue Write Ptr */
char __iomem *cmd_wr_ptr[ROT_QUEUE_MAX][SDE_HW_ROT_REGDMA_TOTAL_CTX];
/* Rotator Context */
struct sde_hw_rotator_context
*rotCtx[ROT_QUEUE_MAX][SDE_HW_ROT_REGDMA_TOTAL_CTX];
/* Cmd timestamp sequence for different priority*/
atomic_t timestamp[ROT_QUEUE_MAX];
/* regdma mode */
enum sde_rotator_regdma_mode mode;
/* logical interrupt number */
int irq_num;
atomic_t irq_enabled;
/* internal ION memory for SW timestamp */
struct ion_client *iclient;
struct sde_mdp_img_data swts_buf;
void *swts_buffer;
u32 highest_bank;
u32 ubwc_malsize;
u32 ubwc_swizzle;
u32 sbuf_headroom;
u32 solid_fill;
u32 constant_color;
spinlock_t rotctx_lock;
spinlock_t rotisr_lock;
bool dbgmem;
bool reset_hw_ts;
u32 last_hwts[ROT_QUEUE_MAX];
u32 koff_timeout;
u32 vid_trigger;
u32 cmd_trigger;
struct list_head sbuf_ctx[ROT_QUEUE_MAX];
const u32 *inpixfmts[SDE_ROTATOR_MODE_MAX];
u32 num_inpixfmt[SDE_ROTATOR_MODE_MAX];
const u32 *outpixfmts[SDE_ROTATOR_MODE_MAX];
u32 num_outpixfmt[SDE_ROTATOR_MODE_MAX];
const char *downscale_caps;
u32 maxlinewidth;
};
/**
* sde_hw_rotator_get_regdma_ctxidx(): regdma segment index is based on
* timestamp. For non-regdma, just return 0 (i.e. first index)
* @ctx: Rotator Context
* return: regdma segment index
*/
static inline u32 sde_hw_rotator_get_regdma_ctxidx(
struct sde_hw_rotator_context *ctx)
{
if (ctx->rot->mode == ROT_REGDMA_OFF)
return 0;
else
return ctx->timestamp & SDE_HW_ROT_REGDMA_SEG_MASK;
}
/**
* sde_hw_rotator_get_regdma_segment_base: return the base pointe of current
* regdma command buffer
* @ctx: Rotator Context
* return: base segment address
*/
static inline char __iomem *sde_hw_rotator_get_regdma_segment_base(
struct sde_hw_rotator_context *ctx)
{
SDEROT_DBG("regdma base @slot[%d]: %pK\n",
sde_hw_rotator_get_regdma_ctxidx(ctx),
ctx->regdma_base);
return ctx->regdma_base;
}
/**
* sde_hw_rotator_get_regdma_segment(): return current regdma command buffer
* pointer for current regdma segment.
* @ctx: Rotator Context
* return: segment address
*/
static inline char __iomem *sde_hw_rotator_get_regdma_segment(
struct sde_hw_rotator_context *ctx)
{
u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
char __iomem *addr = ctx->regdma_wrptr;
SDEROT_DBG("regdma slot[%d] ==> %pK\n", idx, addr);
return addr;
}
/**
* sde_hw_rotator_put_regdma_segment(): update current regdma command buffer
* pointer for current regdma segment
* @ctx: Rotator Context
* @wrptr: current regdma segment location
*/
static inline void sde_hw_rotator_put_regdma_segment(
struct sde_hw_rotator_context *ctx,
char __iomem *wrptr)
{
u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
ctx->regdma_wrptr = wrptr;
SDEROT_DBG("regdma slot[%d] <== %pK\n", idx, wrptr);
}
/**
* sde_hw_rotator_put_ctx(): Storing rotator context according to its
* timestamp.
*/
static inline void sde_hw_rotator_put_ctx(struct sde_hw_rotator_context *ctx)
{
struct sde_hw_rotator *rot = ctx->rot;
u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
unsigned long flags;
spin_lock_irqsave(&rot->rotisr_lock, flags);
rot->rotCtx[ctx->q_id][idx] = ctx;
if (ctx->sbuf_mode)
list_add_tail(&ctx->list, &rot->sbuf_ctx[ctx->q_id]);
spin_unlock_irqrestore(&rot->rotisr_lock, flags);
SDEROT_DBG("rotCtx[%d][%d] <== ctx:%pK | session-id:%d\n",
ctx->q_id, idx, ctx, ctx->session_id);
}
/**
* sde_hw_rotator_clr_ctx(): Clearing rotator context according to its
* timestamp.
*/
static inline void sde_hw_rotator_clr_ctx(struct sde_hw_rotator_context *ctx)
{
struct sde_hw_rotator *rot = ctx->rot;
u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
unsigned long flags;
spin_lock_irqsave(&rot->rotisr_lock, flags);
rot->rotCtx[ctx->q_id][idx] = NULL;
if (ctx->sbuf_mode)
list_del_init(&ctx->list);
spin_unlock_irqrestore(&rot->rotisr_lock, flags);
SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
ctx->q_id, idx, ctx->session_id);
}
#endif /*_SDE_ROTATOR_R3_INTERNAL_H */

ファイルの表示

@@ -0,0 +1,690 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
#include <linux/of_platform.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/qcom-iommu-util.h>
#include "soc/qcom/secure_buffer.h"
#include "sde_rotator_base.h"
#include "sde_rotator_util.h"
#include "sde_rotator_io_util.h"
#include "sde_rotator_smmu.h"
#include "sde_rotator_debug.h"
#define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
#define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
struct sde_smmu_domain {
char *ctx_name;
int domain;
};
static inline bool sde_smmu_is_valid_domain_type(
struct sde_rot_data_type *mdata, int domain_type)
{
return true;
}
static inline bool sde_smmu_is_valid_domain_condition(
struct sde_rot_data_type *mdata,
int domain_type,
bool is_attach)
{
if (is_attach) {
if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
mdata->sde_caps_map) &&
(mdata->sec_cam_en &&
domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
return false;
else
return true;
} else {
if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
mdata->sde_caps_map) &&
(mdata->sec_cam_en &&
domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
return true;
else
return false;
}
}
struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
if (!sde_smmu_is_valid_domain_type(mdata, domain))
return NULL;
return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
&mdata->sde_smmu[domain];
}
static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
struct sde_module_power *mp)
{
u32 i = 0, rc = 0;
const char *clock_name;
u32 clock_rate;
int num_clk;
num_clk = of_property_count_strings(pdev->dev.of_node,
"clock-names");
if (num_clk < 0) {
SDEROT_DBG("clocks are not defined\n");
num_clk = 0;
}
mp->num_clk = num_clk;
mp->clk_config = devm_kzalloc(&pdev->dev,
sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
if (num_clk && !mp->clk_config) {
rc = -ENOMEM;
mp->num_clk = 0;
goto clk_err;
}
for (i = 0; i < mp->num_clk; i++) {
of_property_read_string_index(pdev->dev.of_node, "clock-names",
i, &clock_name);
strlcpy(mp->clk_config[i].clk_name, clock_name,
sizeof(mp->clk_config[i].clk_name));
of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
i, &clock_rate);
mp->clk_config[i].rate = clock_rate;
if (!clock_rate)
mp->clk_config[i].type = SDE_CLK_AHB;
else
mp->clk_config[i].type = SDE_CLK_PCLK;
}
clk_err:
return rc;
}
static int sde_smmu_clk_register(struct platform_device *pdev,
struct sde_module_power *mp)
{
int i, ret;
struct clk *clk;
ret = sde_smmu_util_parse_dt_clock(pdev, mp);
if (ret) {
SDEROT_ERR("unable to parse clocks\n");
return -EINVAL;
}
for (i = 0; i < mp->num_clk; i++) {
clk = devm_clk_get(&pdev->dev,
mp->clk_config[i].clk_name);
if (IS_ERR(clk)) {
SDEROT_ERR("unable to get clk: %s\n",
mp->clk_config[i].clk_name);
return PTR_ERR(clk);
}
mp->clk_config[i].clk = clk;
}
return 0;
}
static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
bool enable)
{
int rc = 0;
struct sde_module_power *mp;
if (!sde_smmu)
return -EINVAL;
mp = &sde_smmu->mp;
if (!mp->num_vreg && !mp->num_clk)
return 0;
if (enable) {
rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
if (rc) {
SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
goto end;
}
sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
VOTE_INDEX_76_MHZ);
rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
SDEROT_ERR("clock enable failed - rc:%d\n", rc);
sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
VOTE_INDEX_DISABLE);
sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
false);
goto end;
}
} else {
sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
VOTE_INDEX_DISABLE);
sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
}
end:
return rc;
}
/*
* sde_smmu_attach()
*
* Associates each configured VA range with the corresponding smmu context
* bank device. Enables the clks as smmu requires voting it before the usage.
* And iommu attach is done only once during the initial attach and it is never
* detached as smmu v2 uses a feature called 'retention'.
*/
int sde_smmu_attach(struct sde_rot_data_type *mdata)
{
struct sde_smmu_client *sde_smmu;
int i, rc = 0;
for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
if (!sde_smmu_is_valid_domain_type(mdata, i))
continue;
sde_smmu = sde_smmu_get_cb(i);
if (sde_smmu && sde_smmu->dev) {
rc = sde_smmu_enable_power(sde_smmu, true);
if (rc) {
SDEROT_ERR(
"power enable failed - domain:[%d] rc:%d\n",
i, rc);
goto err;
}
if (!sde_smmu->domain_attached &&
sde_smmu_is_valid_domain_condition(mdata,
i,
true)) {
rc = qcom_iommu_sid_switch(sde_smmu->dev, SID_ACQUIRE);
if (rc) {
SDEROT_ERR(
"iommu sid switch failed for domain[%d] with err:%d\n",
i, rc);
sde_smmu_enable_power(sde_smmu,
false);
goto err;
}
sde_smmu->domain_attached = true;
SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
}
} else {
SDEROT_DBG(
"iommu device not attached for domain[%d]\n",
i);
}
}
return 0;
err:
for (i--; i >= 0; i--) {
sde_smmu = sde_smmu_get_cb(i);
if (sde_smmu && sde_smmu->dev) {
iommu_detach_device(sde_smmu->rot_domain,
sde_smmu->dev);
sde_smmu_enable_power(sde_smmu, false);
sde_smmu->domain_attached = false;
}
}
return rc;
}
/*
* sde_smmu_detach()
*
* Only disables the clks as it is not required to detach the iommu mapped
* VA range from the device in smmu as explained in the sde_smmu_attach
*/
int sde_smmu_detach(struct sde_rot_data_type *mdata)
{
struct sde_smmu_client *sde_smmu;
int i, rc;
for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
if (!sde_smmu_is_valid_domain_type(mdata, i))
continue;
sde_smmu = sde_smmu_get_cb(i);
if (sde_smmu && sde_smmu->dev) {
if (sde_smmu->domain_attached &&
sde_smmu_is_valid_domain_condition(mdata,
i, false)) {
rc = qcom_iommu_sid_switch(sde_smmu->dev, SID_RELEASE);
if (rc)
SDEROT_ERR("iommu sid switch failed (%d)\n", rc);
else {
SDEROT_DBG("iommu domain[%i] detached\n", i);
sde_smmu->domain_attached = false;
}
} else {
sde_smmu_enable_power(sde_smmu, false);
}
}
}
return 0;
}
int sde_smmu_get_domain_id(u32 type)
{
return type;
}
/*
* sde_smmu_dma_buf_attach()
*
* Same as sde_smmu_dma_buf_attach except that the device is got from
* the configured smmu v2 context banks.
*/
struct dma_buf_attachment *sde_smmu_dma_buf_attach(
struct dma_buf *dma_buf, struct device *dev, int domain)
{
struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
if (!sde_smmu) {
SDEROT_ERR("not able to get smmu context\n");
return NULL;
}
return dma_buf_attach(dma_buf, sde_smmu->dev);
}
/*
* sde_smmu_map_dma_buf()
*
* Maps existing buffer (by struct scatterlist) into SMMU context bank device.
* From which we can take the virtual address and size allocated.
* msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
*/
int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
struct sg_table *table, int domain, dma_addr_t *iova,
unsigned long *size, int dir)
{
int rc;
struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
unsigned long attrs = 0;
if (!sde_smmu) {
SDEROT_ERR("not able to get smmu context\n");
return -EINVAL;
}
rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
attrs);
if (!rc) {
SDEROT_ERR("dma map sg failed\n");
return -ENOMEM;
}
*iova = table->sgl->dma_address;
*size = table->sgl->dma_length;
return 0;
}
void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
int dir, struct dma_buf *dma_buf)
{
struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
if (!sde_smmu) {
SDEROT_ERR("not able to get smmu context\n");
return;
}
dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
}
static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
int sde_smmu_ctrl(int enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int rc = 0;
mutex_lock(&sde_smmu_ref_cnt_lock);
SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
mdata->iommu_attached);
SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
mdata->iommu_attached);
if (enable) {
if (!mdata->iommu_attached) {
rc = sde_smmu_attach(mdata);
if (!rc)
mdata->iommu_attached = true;
}
mdata->iommu_ref_cnt++;
} else {
if (mdata->iommu_ref_cnt) {
mdata->iommu_ref_cnt--;
if (mdata->iommu_ref_cnt == 0)
if (mdata->iommu_attached) {
rc = sde_smmu_detach(mdata);
if (!rc)
mdata->iommu_attached = false;
}
} else {
SDEROT_ERR("unbalanced iommu ref\n");
}
}
mutex_unlock(&sde_smmu_ref_cnt_lock);
if (rc < 0)
return rc;
else
return mdata->iommu_ref_cnt;
}
int sde_smmu_secure_ctrl(int enable)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
int rc = 0;
mutex_lock(&sde_smmu_ref_cnt_lock);
/*
* Attach/detach secure context irrespective of ref count,
* We come here only when secure camera is disabled
*/
if (enable) {
rc = sde_smmu_attach(mdata);
if (!rc)
mdata->iommu_attached = true;
} else {
rc = sde_smmu_detach(mdata);
/*
* keep iommu_attached equal to true,
* so that driver does not attemp to attach
* while in secure state
*/
}
mutex_unlock(&sde_smmu_ref_cnt_lock);
return rc;
}
/*
* sde_smmu_device_create()
* @dev: sde_mdp device
*
* For smmu, each context bank is a separate child device of sde rot.
* Platform devices are created for those smmu related child devices of
* sde rot here. This would facilitate probes to happen for these devices in
* which the smmu mapping and initialization is handled.
*/
void sde_smmu_device_create(struct device *dev)
{
struct device_node *parent, *child;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
parent = dev->of_node;
for_each_child_of_node(parent, child) {
if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC)) {
of_platform_device_create(child, NULL, dev);
mdata->sde_smmu
[SDE_IOMMU_DOMAIN_ROT_SECURE].domain_attached = true;
} else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC)) {
of_platform_device_create(child, NULL, dev);
mdata->sde_smmu
[SDE_IOMMU_DOMAIN_ROT_UNSECURE].domain_attached = true;
}
}
}
int sde_smmu_init(struct device *dev)
{
sde_smmu_device_create(dev);
return 0;
}
static int sde_smmu_fault_handler(struct iommu_domain *domain,
struct device *dev, unsigned long iova,
int flags, void *token)
{
struct sde_smmu_client *sde_smmu;
int rc = -EINVAL;
if (!token) {
SDEROT_ERR("Error: token is NULL\n");
return -EINVAL;
}
sde_smmu = (struct sde_smmu_client *)token;
/* trigger rotator dump */
SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
iova, flags);
SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
/* generate dump, but no panic */
SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
/*
* return -ENOSYS to allow smmu driver to dump out useful
* debug info.
*/
return rc;
}
static struct sde_smmu_domain sde_rot_unsec = {
"rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE};
static struct sde_smmu_domain sde_rot_sec = {
"rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE};
static const struct of_device_id sde_smmu_dt_match[] = {
{ .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
{ .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
{}
};
/*
* sde_smmu_probe()
* @pdev: platform device
*
* Each smmu context acts as a separate device and the context banks are
* configured with a VA range.
* Registeres the clks as each context bank has its own clks, for which voting
* has to be done everytime before using that context bank.
*/
int sde_smmu_probe(struct platform_device *pdev)
{
struct device *dev;
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
struct sde_smmu_client *sde_smmu;
int rc = 0;
struct sde_smmu_domain smmu_domain;
const struct of_device_id *match;
struct sde_module_power *mp;
char name[MAX_CLIENT_NAME_LEN];
u32 sid = 0;
if (!mdata) {
SDEROT_INFO(
"probe failed as mdata is not initializedi, probe defer\n");
return -EPROBE_DEFER;
}
match = of_match_device(sde_smmu_dt_match, &pdev->dev);
if (!match || !match->data) {
SDEROT_ERR("probe failed as match data is invalid\n");
return -EINVAL;
}
smmu_domain = *(struct sde_smmu_domain *) (match->data);
if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
SDEROT_ERR("no matching device found\n");
return -EINVAL;
}
if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
dev = &pdev->dev;
rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
1, &sid);
if (rc)
SDEROT_DBG("SID not defined for domain:%d",
smmu_domain.domain);
} else {
SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
smmu_domain.domain);
return -EINVAL;
}
sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
sde_smmu->domain = smmu_domain.domain;
sde_smmu->sid = sid;
mp = &sde_smmu->mp;
memset(mp, 0, sizeof(struct sde_module_power));
if (of_find_property(pdev->dev.of_node,
"gdsc-mdss-supply", NULL)) {
mp->vreg_config = devm_kzalloc(&pdev->dev,
sizeof(struct sde_vreg), GFP_KERNEL);
if (!mp->vreg_config)
return -ENOMEM;
strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
sizeof(mp->vreg_config->vreg_name));
mp->num_vreg = 1;
}
if (mp->vreg_config) {
rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
mp->num_vreg, true);
if (rc) {
SDEROT_ERR("vreg config failed rc=%d\n", rc);
goto release_vreg;
}
}
rc = sde_smmu_clk_register(pdev, mp);
if (rc) {
SDEROT_ERR(
"smmu clk register failed for domain[%d] with err:%d\n",
smmu_domain.domain, rc);
goto disable_vreg;
}
snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
SDEROT_ERR("mdss bus client register failed\n");
rc = PTR_ERR(sde_smmu->reg_bus_clt);
sde_smmu->reg_bus_clt = NULL;
goto unregister_clk;
}
rc = sde_smmu_enable_power(sde_smmu, true);
if (rc) {
SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
smmu_domain.domain, rc);
goto bus_client_destroy;
}
sde_smmu->dev = &pdev->dev;
sde_smmu->rot_domain = iommu_get_domain_for_dev(sde_smmu->dev);
if (!sde_smmu->rot_domain) {
dev_err(&pdev->dev, "iommu get domain failed\n");
return -EINVAL;
}
if (!dev->dma_parms)
dev->dma_parms = devm_kzalloc(dev,
sizeof(*dev->dma_parms), GFP_KERNEL);
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
iommu_set_fault_handler(sde_smmu->rot_domain,
sde_smmu_fault_handler, (void *)sde_smmu);
sde_smmu_enable_power(sde_smmu, false);
SDEROT_INFO(
"iommu v2 domain[%d] mapping and clk register successful!\n",
smmu_domain.domain);
return 0;
bus_client_destroy:
sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
sde_smmu->reg_bus_clt = NULL;
unregister_clk:
disable_vreg:
sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
sde_smmu->mp.num_vreg, false);
release_vreg:
devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
sde_smmu->mp.vreg_config = NULL;
sde_smmu->mp.num_vreg = 0;
return rc;
}
int sde_smmu_remove(struct platform_device *pdev)
{
int i;
struct sde_smmu_client *sde_smmu;
for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
sde_smmu = sde_smmu_get_cb(i);
if (!sde_smmu || !sde_smmu->dev ||
(sde_smmu->dev != &pdev->dev))
continue;
sde_smmu->dev = NULL;
sde_smmu->rot_domain = NULL;
sde_smmu_enable_power(sde_smmu, false);
sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
sde_smmu->reg_bus_clt = NULL;
sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
sde_smmu->mp.num_vreg, false);
devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
sde_smmu->mp.vreg_config = NULL;
sde_smmu->mp.num_vreg = 0;
}
return 0;
}
static struct platform_driver sde_smmu_driver = {
.probe = sde_smmu_probe,
.remove = sde_smmu_remove,
.shutdown = NULL,
.driver = {
.name = "sde_smmu",
.of_match_table = sde_smmu_dt_match,
},
};
void sde_rotator_smmu_driver_register(void)
{
platform_driver_register(&sde_smmu_driver);
}
void sde_rotator_smmu_driver_unregister(void)
{
platform_driver_unregister(&sde_smmu_driver);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
MODULE_IMPORT_NS(DMA_BUF);
#endif

ファイルの表示

@@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef SDE_ROTATOR_SMMU_H
#define SDE_ROTATOR_SMMU_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include "sde_rotator_io_util.h"
enum sde_iommu_domain_type {
SDE_IOMMU_DOMAIN_ROT_UNSECURE,
SDE_IOMMU_DOMAIN_ROT_SECURE,
SDE_IOMMU_MAX_DOMAIN
};
int sde_smmu_init(struct device *dev);
static inline int sde_smmu_dma_data_direction(int dir)
{
return dir;
}
#if IS_ENABLED(CONFIG_MSM_SDE_ROTATOR)
int sde_smmu_ctrl(int enable);
#else
static inline int sde_smmu_ctrl(int enable)
{
return 0;
}
#endif /* CONFIG_MSM_SDE_ROTATOR */
struct dma_buf_attachment *sde_smmu_dma_buf_attach(
struct dma_buf *dma_buf, struct device *dev, int domain);
int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
struct sg_table *table, int domain, dma_addr_t *iova,
unsigned long *size, int dir);
void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
int dir, struct dma_buf *dma_buf);
int sde_smmu_secure_ctrl(int enable);
#endif /* SDE_ROTATOR_SMMU_H */

ファイルの表示

@@ -0,0 +1,435 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dma-fence.h>
#include <linux/sync_file.h>
#include "sde_rotator_util.h"
#include "sde_rotator_sync.h"
#define SDE_ROT_SYNC_NAME_SIZE 64
#define SDE_ROT_SYNC_DRIVER_NAME "sde_rot"
/**
* struct sde_rot_fence - sync fence context
* @base: base sync fence object
* @name: name of this sync fence
* @fence_list: linked list of outstanding sync fence
*/
struct sde_rot_fence {
struct dma_fence base;
char name[SDE_ROT_SYNC_NAME_SIZE];
struct list_head fence_list;
};
/**
* struct sde_rot_timeline - sync timeline context
* @kref: reference count of timeline
* @lock: serialization lock for timeline and fence update
* @name: name of timeline
* @fence_name: fence name prefix
* @next_value: next commit sequence number
* @curr_value: current retired sequence number
* @context: fence context identifier
* @fence_list_head: linked list of outstanding sync fence
*/
struct sde_rot_timeline {
struct kref kref;
spinlock_t lock;
char name[SDE_ROT_SYNC_NAME_SIZE];
char fence_name[SDE_ROT_SYNC_NAME_SIZE];
u32 next_value;
u32 curr_value;
u64 context;
struct list_head fence_list_head;
};
/*
* to_sde_rot_fence - get rotator fence from fence base object
* @fence: Pointer to fence base object
*/
static struct sde_rot_fence *to_sde_rot_fence(struct dma_fence *fence)
{
return container_of(fence, struct sde_rot_fence, base);
}
/*
* to_sde_rot_timeline - get rotator timeline from fence base object
* @fence: Pointer to fence base object
*/
static struct sde_rot_timeline *to_sde_rot_timeline(struct dma_fence *fence)
{
return container_of(fence->lock, struct sde_rot_timeline, lock);
}
/*
* sde_rotator_free_timeline - Free the given timeline object
* @kref: Pointer to timeline kref object.
*/
static void sde_rotator_free_timeline(struct kref *kref)
{
struct sde_rot_timeline *tl =
container_of(kref, struct sde_rot_timeline, kref);
kfree(tl);
}
/*
* sde_rotator_put_timeline - Put the given timeline object
* @tl: Pointer to timeline object.
*/
static void sde_rotator_put_timeline(struct sde_rot_timeline *tl)
{
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return;
}
kref_put(&tl->kref, sde_rotator_free_timeline);
}
/*
* sde_rotator_get_timeline - Get the given timeline object
* @tl: Pointer to timeline object.
*/
static void sde_rotator_get_timeline(struct sde_rot_timeline *tl)
{
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return;
}
kref_get(&tl->kref);
}
static const char *sde_rot_fence_get_driver_name(struct dma_fence *fence)
{
return SDE_ROT_SYNC_DRIVER_NAME;
}
static const char *sde_rot_fence_get_timeline_name(struct dma_fence *fence)
{
struct sde_rot_timeline *tl = to_sde_rot_timeline(fence);
return tl->name;
}
static bool sde_rot_fence_enable_signaling(struct dma_fence *fence)
{
return true;
}
static bool sde_rot_fence_signaled(struct dma_fence *fence)
{
struct sde_rot_timeline *tl = to_sde_rot_timeline(fence);
bool status;
status = ((s32) (tl->curr_value - fence->seqno)) >= 0;
SDEROT_DBG("status:%d fence seq:%llu and timeline:%d\n",
status, fence->seqno, tl->curr_value);
return status;
}
static void sde_rot_fence_release(struct dma_fence *fence)
{
struct sde_rot_fence *f = to_sde_rot_fence(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
if (!list_empty(&f->fence_list))
list_del(&f->fence_list);
spin_unlock_irqrestore(fence->lock, flags);
sde_rotator_put_timeline(to_sde_rot_timeline(fence));
kfree(f);
}
static void sde_rot_fence_value_str(struct dma_fence *fence, char *str,
int size)
{
snprintf(str, size, "%llu", fence->seqno);
}
static void sde_rot_fence_timeline_value_str(struct dma_fence *fence,
char *str, int size)
{
struct sde_rot_timeline *tl = to_sde_rot_timeline(fence);
snprintf(str, size, "%u", tl->curr_value);
}
static struct dma_fence_ops sde_rot_fence_ops = {
.get_driver_name = sde_rot_fence_get_driver_name,
.get_timeline_name = sde_rot_fence_get_timeline_name,
.enable_signaling = sde_rot_fence_enable_signaling,
.signaled = sde_rot_fence_signaled,
.wait = dma_fence_default_wait,
.release = sde_rot_fence_release,
.fence_value_str = sde_rot_fence_value_str,
.timeline_value_str = sde_rot_fence_timeline_value_str,
};
/*
* sde_rotator_create_timeline - Create timeline object with the given name
* @name: Pointer to name character string.
*/
struct sde_rot_timeline *sde_rotator_create_timeline(const char *name)
{
struct sde_rot_timeline *tl;
if (!name) {
SDEROT_ERR("invalid parameters\n");
return NULL;
}
tl = kzalloc(sizeof(struct sde_rot_timeline), GFP_KERNEL);
if (!tl)
return NULL;
kref_init(&tl->kref);
snprintf(tl->name, sizeof(tl->name), "rot_timeline_%s", name);
snprintf(tl->fence_name, sizeof(tl->fence_name), "rot_fence_%s", name);
spin_lock_init(&tl->lock);
tl->context = dma_fence_context_alloc(1);
INIT_LIST_HEAD(&tl->fence_list_head);
return tl;
}
/*
* sde_rotator_destroy_timeline - Destroy the given timeline object
* @tl: Pointer to timeline object.
*/
void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl)
{
sde_rotator_put_timeline(tl);
}
/*
* sde_rotator_inc_timeline_locked - Increment timeline by given amount
* @tl: Pointer to timeline object.
* @increment: the amount to increase the timeline by.
*/
static int sde_rotator_inc_timeline_locked(struct sde_rot_timeline *tl,
int increment)
{
struct sde_rot_fence *f, *next;
tl->curr_value += increment;
list_for_each_entry_safe(f, next, &tl->fence_list_head, fence_list) {
if (dma_fence_is_signaled_locked(&f->base)) {
SDEROT_DBG("%s signaled\n", f->name);
list_del_init(&f->fence_list);
}
}
return 0;
}
/*
* sde_rotator_resync_timeline - Resync timeline to last committed value
* @tl: Pointer to timeline object.
*/
void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
{
unsigned long flags;
s32 val;
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return;
}
spin_lock_irqsave(&tl->lock, flags);
val = tl->next_value - tl->curr_value;
if (val > 0) {
SDEROT_WARN("flush %s:%d\n", tl->name, val);
sde_rotator_inc_timeline_locked(tl, val);
}
spin_unlock_irqrestore(&tl->lock, flags);
}
/*
* sde_rotator_get_sync_fence - Create fence object from the given timeline
* @tl: Pointer to timeline object
* @fence_fd: Pointer to file descriptor associated with the returned fence.
* Null if not required.
* @timestamp: Pointer to timestamp of the returned fence. Null if not required.
*/
struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp)
{
struct sde_rot_fence *f;
unsigned long flags;
u32 val;
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return NULL;
}
f = kzalloc(sizeof(struct sde_rot_fence), GFP_KERNEL);
if (!f)
return NULL;
INIT_LIST_HEAD(&f->fence_list);
spin_lock_irqsave(&tl->lock, flags);
val = ++(tl->next_value);
dma_fence_init(&f->base, &sde_rot_fence_ops, &tl->lock,
tl->context, val);
list_add_tail(&f->fence_list, &tl->fence_list_head);
sde_rotator_get_timeline(tl);
spin_unlock_irqrestore(&tl->lock, flags);
snprintf(f->name, sizeof(f->name), "%s_%u", tl->fence_name, val);
if (fence_fd)
*fence_fd = sde_rotator_get_sync_fence_fd(
(struct sde_rot_sync_fence *) &f->base);
if (timestamp)
*timestamp = val;
SDEROT_DBG("output sync fence created at val=%u\n", val);
return (struct sde_rot_sync_fence *) &f->base;
}
/*
* sde_rotator_inc_timeline - Increment timeline by given amount
* @tl: Pointer to timeline object.
* @increment: the amount to increase the timeline by.
*/
int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment)
{
unsigned long flags;
int rc;
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return -EINVAL;
}
spin_lock_irqsave(&tl->lock, flags);
rc = sde_rotator_inc_timeline_locked(tl, increment);
spin_unlock_irqrestore(&tl->lock, flags);
return rc;
}
/*
* sde_rotator_get_timeline_commit_ts - Return commit tick of given timeline
* @tl: Pointer to timeline object.
*/
u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl)
{
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return 0;
}
return tl->next_value;
}
/*
* sde_rotator_get_timeline_retire_ts - Return retire tick of given timeline
* @tl: Pointer to timeline object.
*/
u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl)
{
if (!tl) {
SDEROT_ERR("invalid parameters\n");
return 0;
}
return tl->curr_value;
}
/*
* sde_rotator_put_sync_fence - Destroy given fence object
* @fence: Pointer to fence object.
*/
void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence)
{
if (!fence) {
SDEROT_ERR("invalid parameters\n");
return;
}
dma_fence_put((struct dma_fence *) fence);
}
/*
* sde_rotator_wait_sync_fence - Wait until fence signal or timeout
* @fence: Pointer to fence object.
* @timeout: maximum wait time, in msec, for fence to signal.
*/
int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
long timeout)
{
int rc;
if (!fence) {
SDEROT_ERR("invalid parameters\n");
return -EINVAL;
}
rc = dma_fence_wait_timeout((struct dma_fence *) fence, false,
msecs_to_jiffies(timeout));
if (rc > 0) {
SDEROT_DBG("fence signaled\n");
rc = 0;
} else if (rc == 0) {
SDEROT_DBG("fence timeout\n");
rc = -ETIMEDOUT;
}
return rc;
}
/*
* sde_rotator_get_sync_fence_fd - Get fence object of given file descriptor
* @fd: File description of fence object.
*/
struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd)
{
return (struct sde_rot_sync_fence *) sync_file_get_fence(fd);
}
/*
* sde_rotator_get_sync_fence_fd - Get file descriptor of given fence object
* @fence: Pointer to fence object.
*/
int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence)
{
int fd;
struct sync_file *sync_file;
if (!fence) {
SDEROT_ERR("invalid parameters\n");
return -EINVAL;
}
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
SDEROT_ERR("fail to get unused fd\n");
return fd;
}
sync_file = sync_file_create((struct dma_fence *) fence);
if (!sync_file) {
put_unused_fd(fd);
SDEROT_ERR("failed to create sync file\n");
return -ENOMEM;
}
fd_install(fd, sync_file->file);
return fd;
}

ファイルの表示

@@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef SDE_ROTATOR_SYNC_H
#define SDE_ROTATOR_SYNC_H
#include <linux/types.h>
#include <linux/errno.h>
struct sde_rot_sync_fence;
struct sde_rot_timeline;
#if defined(CONFIG_SYNC_FILE)
struct sde_rot_timeline *sde_rotator_create_timeline(const char *name);
void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl);
struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp);
void sde_rotator_resync_timeline(struct sde_rot_timeline *tl);
u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl);
u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl);
int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment);
void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence);
int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
long timeout);
struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd);
int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence);
#else
static inline
struct sde_rot_timeline *sde_rotator_create_timeline(const char *name)
{
return NULL;
}
static inline
void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl)
{
}
static inline
struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp)
{
return NULL;
}
static inline
void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
{
}
static inline
int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment)
{
return 0;
}
static inline
u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl)
{
return 0;
}
static inline
u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl)
{
return 0;
}
static inline
void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence)
{
}
static inline
int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
long timeout)
{
return 0;
}
static inline
struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd)
{
return NULL;
}
static inline
int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence)
{
return -EBADF;
}
#endif
#endif /* SDE_ROTATOR_SYNC_H */

ファイルの表示

@@ -0,0 +1,306 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*/
#if !defined(TRACE_SDE_ROTATOR_H) || defined(TRACE_HEADER_MULTI_READ)
#define TRACE_SDE_ROTATOR_H
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sde_rotator
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE sde_rotator_trace
#include <linux/tracepoint.h>
#include <sde_rotator_core.h>
DECLARE_EVENT_CLASS(rot_entry_template,
TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
TP_ARGS(ss_id, sq_id, rot),
TP_STRUCT__entry(
__field(u32, ss_id)
__field(u32, sq_id)
__field(u32, pr_id)
__field(u32, flags)
__field(u32, src_fmt)
__field(u16, src_bw)
__field(u16, src_bh)
__field(u16, src_x)
__field(u16, src_y)
__field(u16, src_w)
__field(u16, src_h)
__field(u32, dst_fmt)
__field(u16, dst_bw)
__field(u16, dst_bh)
__field(u16, dst_x)
__field(u16, dst_y)
__field(u16, dst_w)
__field(u16, dst_h)
),
TP_fast_assign(
__entry->ss_id = ss_id;
__entry->sq_id = sq_id;
__entry->pr_id = rot->wb_idx;
__entry->flags = rot->flags;
__entry->src_fmt = rot->input_format;
__entry->src_bw = rot->input_width;
__entry->src_bh = rot->input_height;
__entry->src_x = rot->src_x;
__entry->src_y = rot->src_y;
__entry->src_w = rot->src_w;
__entry->src_h = rot->src_h;
__entry->dst_fmt = rot->output_format;
__entry->dst_bw = rot->output_width;
__entry->dst_bh = rot->output_height;
__entry->dst_x = rot->dst_x;
__entry->dst_y = rot->dst_y;
__entry->dst_w = rot->dst_w;
__entry->dst_h = rot->dst_h;
),
TP_printk("%d.%d|%d|%x|%x|%u,%u|%u,%u,%u,%u|%x|%u,%u|%u,%u,%u,%u|",
__entry->ss_id, __entry->sq_id, __entry->pr_id,
__entry->flags,
__entry->src_fmt, __entry->src_bw, __entry->src_bh,
__entry->src_x, __entry->src_y,
__entry->src_w, __entry->src_h,
__entry->dst_fmt, __entry->dst_bw, __entry->dst_bh,
__entry->dst_x, __entry->dst_y,
__entry->dst_w, __entry->dst_h)
);
DEFINE_EVENT(rot_entry_template, rot_entry_fence,
TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
TP_ARGS(ss_id, sq_id, rot)
);
DEFINE_EVENT(rot_entry_template, rot_entry_commit,
TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
TP_ARGS(ss_id, sq_id, rot)
);
DEFINE_EVENT(rot_entry_template, rot_entry_done,
TP_PROTO(u32 ss_id, u32 sq_id, struct sde_rot_trace_entry *rot),
TP_ARGS(ss_id, sq_id, rot)
);
TRACE_EVENT(rot_perf_set_qos_luts,
TP_PROTO(u32 pnum, u32 fmt, u32 lut, bool linear),
TP_ARGS(pnum, fmt, lut, linear),
TP_STRUCT__entry(
__field(u32, pnum)
__field(u32, fmt)
__field(u32, lut)
__field(bool, linear)
),
TP_fast_assign(
__entry->pnum = pnum;
__entry->fmt = fmt;
__entry->lut = lut;
__entry->linear = linear;
),
TP_printk("pnum=%d fmt=%d lut=0x%x lin:%d",
__entry->pnum, __entry->fmt,
__entry->lut, __entry->linear)
);
TRACE_EVENT(rot_perf_set_panic_luts,
TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 panic_lut,
u32 robust_lut),
TP_ARGS(pnum, fmt, mode, panic_lut, robust_lut),
TP_STRUCT__entry(
__field(u32, pnum)
__field(u32, fmt)
__field(u32, mode)
__field(u32, panic_lut)
__field(u32, robust_lut)
),
TP_fast_assign(
__entry->pnum = pnum;
__entry->fmt = fmt;
__entry->mode = mode;
__entry->panic_lut = panic_lut;
__entry->robust_lut = robust_lut;
),
TP_printk("pnum=%d fmt=%d mode=%d luts[0x%x, 0x%x]",
__entry->pnum, __entry->fmt,
__entry->mode, __entry->panic_lut,
__entry->robust_lut)
);
TRACE_EVENT(rot_perf_set_wm_levels,
TP_PROTO(u32 pnum, u32 use_space, u32 priority_bytes, u32 wm0, u32 wm1,
u32 wm2, u32 mb_cnt, u32 mb_size),
TP_ARGS(pnum, use_space, priority_bytes, wm0, wm1, wm2, mb_cnt,
mb_size),
TP_STRUCT__entry(
__field(u32, pnum)
__field(u32, use_space)
__field(u32, priority_bytes)
__field(u32, wm0)
__field(u32, wm1)
__field(u32, wm2)
__field(u32, mb_cnt)
__field(u32, mb_size)
),
TP_fast_assign(
__entry->pnum = pnum;
__entry->use_space = use_space;
__entry->priority_bytes = priority_bytes;
__entry->wm0 = wm0;
__entry->wm1 = wm1;
__entry->wm2 = wm2;
__entry->mb_cnt = mb_cnt;
__entry->mb_size = mb_size;
),
TP_printk(
"pnum:%d useable_space:%d priority_bytes:%d watermark:[%d | %d | %d] nmb=%d mb_size=%d",
__entry->pnum, __entry->use_space,
__entry->priority_bytes, __entry->wm0, __entry->wm1,
__entry->wm2, __entry->mb_cnt, __entry->mb_size)
);
TRACE_EVENT(rot_perf_set_ot,
TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim),
TP_ARGS(pnum, xin_id, rd_lim),
TP_STRUCT__entry(
__field(u32, pnum)
__field(u32, xin_id)
__field(u32, rd_lim)
),
TP_fast_assign(
__entry->pnum = pnum;
__entry->xin_id = xin_id;
__entry->rd_lim = rd_lim;
),
TP_printk("pnum:%d xin_id:%d ot:%d",
__entry->pnum, __entry->xin_id, __entry->rd_lim)
);
TRACE_EVENT(rot_perf_prefill_calc,
TP_PROTO(u32 pnum, u32 latency_buf, u32 ot, u32 y_buf, u32 y_scaler,
u32 pp_lines, u32 pp_bytes, u32 post_sc, u32 fbc_bytes,
u32 prefill_bytes),
TP_ARGS(pnum, latency_buf, ot, y_buf, y_scaler, pp_lines, pp_bytes,
post_sc, fbc_bytes, prefill_bytes),
TP_STRUCT__entry(
__field(u32, pnum)
__field(u32, latency_buf)
__field(u32, ot)
__field(u32, y_buf)
__field(u32, y_scaler)
__field(u32, pp_lines)
__field(u32, pp_bytes)
__field(u32, post_sc)
__field(u32, fbc_bytes)
__field(u32, prefill_bytes)
),
TP_fast_assign(
__entry->pnum = pnum;
__entry->latency_buf = latency_buf;
__entry->ot = ot;
__entry->y_buf = y_buf;
__entry->y_scaler = y_scaler;
__entry->pp_lines = pp_lines;
__entry->pp_bytes = pp_bytes;
__entry->post_sc = post_sc;
__entry->fbc_bytes = fbc_bytes;
__entry->prefill_bytes = prefill_bytes;
),
TP_printk(
"pnum:%d latency_buf:%d ot:%d y_buf:%d y_scaler:%d pp_lines:%d, pp_bytes=%d post_sc:%d fbc_bytes:%d prefill:%d",
__entry->pnum, __entry->latency_buf, __entry->ot,
__entry->y_buf, __entry->y_scaler, __entry->pp_lines,
__entry->pp_bytes, __entry->post_sc,
__entry->fbc_bytes, __entry->prefill_bytes)
);
TRACE_EVENT(rot_mark_write,
TP_PROTO(int pid, const char *name, bool trace_begin),
TP_ARGS(pid, name, trace_begin),
TP_STRUCT__entry(
__field(int, pid)
__string(trace_name, name)
__field(bool, trace_begin)
),
TP_fast_assign(
__entry->pid = pid;
__assign_str(trace_name, name);
__entry->trace_begin = trace_begin;
),
TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
__entry->pid, __get_str(trace_name))
);
TRACE_EVENT(rot_trace_counter,
TP_PROTO(int pid, char *name, s64 value),
TP_ARGS(pid, name, value),
TP_STRUCT__entry(
__field(int, pid)
__string(counter_name, name)
__field(s64, value)
),
TP_fast_assign(
__entry->pid = current->tgid;
__assign_str(counter_name, name);
__entry->value = value;
),
TP_printk("%d|%s|%lld", __entry->pid,
__get_str(counter_name), __entry->value)
);
TRACE_EVENT(rot_bw_ao_as_context,
TP_PROTO(u32 state),
TP_ARGS(state),
TP_STRUCT__entry(
__field(u32, state)
),
TP_fast_assign(
__entry->state = state;
),
TP_printk("Rotator bw context %s",
__entry->state ? "Active Only" : "Active+Sleep")
);
#define SDE_ROT_TRACE_EVTLOG_SIZE 15
TRACE_EVENT(sde_rot_evtlog,
TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 *data),
TP_ARGS(tag, tag_id, cnt, data),
TP_STRUCT__entry(
__field(int, pid)
__string(evtlog_tag, tag)
__field(u32, tag_id)
__array(u32, data, SDE_ROT_TRACE_EVTLOG_SIZE)
),
TP_fast_assign(
__entry->pid = current->tgid;
__assign_str(evtlog_tag, tag);
__entry->tag_id = tag_id;
if (cnt > SDE_ROT_TRACE_EVTLOG_SIZE)
cnt = SDE_ROT_TRACE_EVTLOG_SIZE;
memcpy(__entry->data, data, cnt * sizeof(u32));
memset(&__entry->data[cnt], 0,
(SDE_ROT_TRACE_EVTLOG_SIZE - cnt) *
sizeof(u32));
),
TP_printk("%d|%s:%d|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x|%x",
__entry->pid, __get_str(evtlog_tag),
__entry->tag_id,
__entry->data[0], __entry->data[1],
__entry->data[2], __entry->data[3],
__entry->data[4], __entry->data[5],
__entry->data[6], __entry->data[7],
__entry->data[8], __entry->data[9],
__entry->data[10], __entry->data[11],
__entry->data[12], __entry->data[13],
__entry->data[14])
)
#endif /* if !defined(TRACE_SDE_ROTATOR_H) ||
* defined(TRACE_HEADER_MULTI_READ)
*/
/* This part must be outside protection */
#include <trace/define_trace.h>

ファイル差分が大きすぎるため省略します 差分を読み込み

ファイルの表示

@@ -0,0 +1,199 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_UTIL_H__
#define __SDE_ROTATOR_UTIL_H__
#include <linux/types.h>
#include <linux/file.h>
#include <linux/kref.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include "sde_rotator_hwio.h"
#include "sde_rotator_base.h"
#include "sde_rotator_sync.h"
#include "sde_rotator_io_util.h"
#include "sde_rotator_formats.h"
#define SDE_ROT_MAX_IMG_WIDTH 0x3FFF
#define SDE_ROT_MAX_IMG_HEIGHT 0x3FFF
#define SDEROT_DBG(fmt, ...) pr_debug("<SDEROT_DBG> " fmt, ##__VA_ARGS__)
#define SDEROT_INFO(fmt, ...) pr_info("<SDEROT_INFO> " fmt, ##__VA_ARGS__)
#define SDEROT_INFO_ONCE(fmt, ...) \
pr_info_once("<SDEROT_INFO> " fmt, ##__VA_ARGS__)
#define SDEROT_WARN(fmt, ...) pr_warn("<SDEROT_WARN> " fmt, ##__VA_ARGS__)
#define SDEROT_ERR(fmt, ...) pr_err("<SDEROT_ERR> " fmt, ##__VA_ARGS__)
#define SDEDEV_DBG(dev, fmt, ...) \
dev_dbg(dev, "<SDEROT_DBG> " fmt, ##__VA_ARGS__)
#define SDEDEV_INFO(dev, fmt, ...) \
dev_info(dev, "<SDEROT_INFO> " fmt, ##__VA_ARGS__)
#define SDEDEV_WARN(dev, fmt, ...) \
dev_warn(dev, "<SDEROT_WARN> " fmt, ##__VA_ARGS__)
#define SDEDEV_ERR(dev, fmt, ...) \
dev_err(dev, "<SDEROT_ERR> " fmt, ##__VA_ARGS__)
#define PHY_ADDR_4G (1ULL<<32)
struct sde_rect {
u16 x;
u16 y;
u16 w;
u16 h;
};
/* sde flag values */
#define SDE_ROT_NOP 0
#define SDE_FLIP_LR 0x1
#define SDE_FLIP_UD 0x2
#define SDE_ROT_90 0x4
#define SDE_ROT_180 (SDE_FLIP_UD|SDE_FLIP_LR)
#define SDE_ROT_270 (SDE_ROT_90|SDE_FLIP_UD|SDE_FLIP_LR)
#define SDE_DEINTERLACE 0x80000000
#define SDE_SOURCE_ROTATED_90 0x00100000
#define SDE_SECURE_OVERLAY_SESSION 0x00008000
#define SDE_ROT_EXT_DMA_BUF 0x00010000
#define SDE_SECURE_CAMERA_SESSION 0x00020000
#define SDE_ROT_EXT_IOVA 0x00040000
struct sde_rot_data_type;
struct sde_fb_data {
uint32_t offset;
struct dma_buf *buffer;
int memory_id;
int id;
uint32_t flags;
uint32_t priv;
dma_addr_t addr;
u32 len;
};
struct sde_layer_plane {
/* DMA buffer file descriptor information. */
int fd;
struct dma_buf *buffer;
/* i/o virtual address & length */
dma_addr_t addr;
u32 len;
/* Pixel offset in the dma buffer. */
uint32_t offset;
/* Number of bytes in one scan line including padding bytes. */
uint32_t stride;
};
struct sde_layer_buffer {
/* layer width in pixels. */
uint32_t width;
/* layer height in pixels. */
uint32_t height;
/*
* layer format in DRM-style fourcc, refer drm_fourcc.h for
* standard formats
*/
uint32_t format;
/* plane to hold the fd, offset, etc for all color components */
struct sde_layer_plane planes[SDE_ROT_MAX_PLANES];
/* valid planes count in layer planes list */
uint32_t plane_count;
/* compression ratio factor, value depends on the pixel format */
struct sde_mult_factor comp_ratio;
/*
* SyncFence associated with this buffer. It is used in two ways.
*
* 1. Driver waits to consume the buffer till producer signals in case
* of primary and external display.
*
* 2. Writeback device uses buffer structure for output buffer where
* driver is producer. However, client sends the fence with buffer to
* indicate that consumer is still using the buffer and it is not ready
* for new content.
*/
struct sde_rot_sync_fence *fence;
/* indicate if this is a stream (inline) buffer */
bool sbuf;
/* specify the system cache id in stream buffer mode */
int scid;
/* indicate if system cache writeback is required */
bool writeback;
};
struct sde_mdp_plane_sizes {
u32 num_planes;
u32 plane_size[SDE_ROT_MAX_PLANES];
u32 total_size;
u32 ystride[SDE_ROT_MAX_PLANES];
u32 rau_cnt;
u32 rau_h[2];
};
struct sde_mdp_img_data {
dma_addr_t addr;
unsigned long len;
u32 offset;
u32 flags;
bool mapped;
bool skip_detach;
struct fd srcp_f;
struct dma_buf *srcp_dma_buf;
struct dma_buf_attachment *srcp_attachment;
struct sg_table *srcp_table;
};
struct sde_mdp_data {
u8 num_planes;
struct sde_mdp_img_data p[SDE_ROT_MAX_PLANES];
bool sbuf;
int scid;
bool writeback;
};
void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
u8 *v_sample, u8 *h_sample);
static inline u32 sde_mdp_general_align(u32 data, u32 alignment)
{
return ((data + alignment - 1)/alignment) * alignment;
}
void sde_rot_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt);
int sde_validate_offset_for_ubwc_format(
struct sde_mdp_format_params *fmt, u16 x, u16 y);
int sde_mdp_data_get_and_validate_size(struct sde_mdp_data *data,
struct sde_fb_data *planes, int num_planes, u32 flags,
struct device *dev, bool rotator, int dir,
struct sde_layer_buffer *buffer);
int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h,
struct sde_mdp_plane_sizes *ps, u32 bwc_mode,
bool rotation);
int sde_mdp_data_map(struct sde_mdp_data *data, bool rotator, int dir);
int sde_mdp_data_check(struct sde_mdp_data *data,
struct sde_mdp_plane_sizes *ps,
struct sde_mdp_format_params *fmt);
void sde_mdp_data_free(struct sde_mdp_data *data, bool rotator, int dir);
struct dma_buf *sde_rot_get_dmabuf(struct sde_mdp_img_data *data);
#endif /* __SDE_ROTATOR_UTIL_H__ */

ファイルの表示

@@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_ROTATOR_VBIF_H__
#define __SDE_ROTATOR_VBIF_H__
void mdp_vbif_lock(struct platform_device *parent_pdev, bool enable);
#endif /* __SDE_ROTATOR_VBIF_H__ */