Merge branch 'display-kernel.lnx.5.10' into display-kernel.lnx.1.0
Change-Id: I5d2b08380b6b0eb09492b950fb38cd9a0b3196c1
这个提交包含在:
11
config/gki_neodisp.conf
普通文件
11
config/gki_neodisp.conf
普通文件
@@ -0,0 +1,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
|
||||
export CONFIG_DRM_MSM=y
|
||||
export CONFIG_DRM_MSM_SDE=y
|
||||
export CONFIG_SYNC_FILE=y
|
||||
export CONFIG_DRM_MSM_DSI=y
|
||||
export CONFIG_DSI_PARSER=y
|
||||
export CONFIG_QCOM_MDSS_PLL=y
|
||||
export CONFIG_DRM_MSM_REGISTER_LOGGING=y
|
||||
export CONFIG_DISPLAY_BUILD=m
|
14
config/gki_neodispconf.h
普通文件
14
config/gki_neodispconf.h
普通文件
@@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define CONFIG_DRM_MSM 1
|
||||
#define CONFIG_DRM_MSM_SDE 1
|
||||
#define CONFIG_SYNC_FILE 1
|
||||
#define CONFIG_DRM_MSM_DSI 1
|
||||
#define CONFIG_DSI_PARSER 1
|
||||
#define CONFIG_DRM_MSM_REGISTER_LOGGING 1
|
||||
#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
|
||||
#define CONFIG_QCOM_MDSS_PLL 1
|
||||
#define CONFIG_GKI_DISPLAY 1
|
11
config/gki_parrotdisp.conf
普通文件
11
config/gki_parrotdisp.conf
普通文件
@@ -0,0 +1,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
export CONFIG_DRM_MSM=y
|
||||
export CONFIG_DRM_MSM_SDE=y
|
||||
export CONFIG_SYNC_FILE=y
|
||||
export CONFIG_DRM_MSM_DSI=y
|
||||
export CONFIG_DSI_PARSER=y
|
||||
export CONFIG_DRM_SDE_WB=y
|
||||
export CONFIG_QCOM_MDSS_PLL=y
|
||||
export CONFIG_DRM_MSM_REGISTER_LOGGING=y
|
||||
export CONFIG_DISPLAY_BUILD=m
|
16
config/gki_parrotdispconf.h
普通文件
16
config/gki_parrotdispconf.h
普通文件
@@ -0,0 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define CONFIG_DRM_MSM 1
|
||||
#define CONFIG_DRM_MSM_SDE 1
|
||||
#define CONFIG_SYNC_FILE 1
|
||||
#define CONFIG_DRM_MSM_DSI 1
|
||||
#define CONFIG_DSI_PARSER 1
|
||||
#define CONFIG_DRM_SDE_WB 1
|
||||
#define CONFIG_DRM_MSM_REGISTER_LOGGING 1
|
||||
#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
|
||||
#define CONFIG_QCOM_MDSS_PLL 1
|
||||
#define CONFIG_GKI_DISPLAY 1
|
@@ -681,6 +681,8 @@ struct drm_msm_rc_mask_cfg {
|
||||
__u64 cfg_param_07;
|
||||
__u32 cfg_param_08;
|
||||
__u64 cfg_param_09[RC_DATA_SIZE_MAX];
|
||||
__u32 height;
|
||||
__u32 width;
|
||||
};
|
||||
|
||||
#define FP16_SUPPORTED
|
||||
@@ -730,6 +732,8 @@ struct drm_msm_backlight_info {
|
||||
__u32 bl_scale_sv;
|
||||
__u32 status;
|
||||
__u32 min_bl;
|
||||
__u32 bl_scale_max;
|
||||
__u32 bl_scale_sv_max;
|
||||
};
|
||||
|
||||
#define DIMMING_BL_LUT_LEN 8192
|
||||
|
@@ -903,6 +903,7 @@ struct sde_drm_dnsc_blur_cfg {
|
||||
#define DRM_EVENT_MMRM_CB 0X8000000B
|
||||
#define DRM_EVENT_FRAME_DATA 0x8000000C
|
||||
#define DRM_EVENT_DIMMING_BL 0X8000000D
|
||||
#define DRM_EVENT_VM_RELEASE 0X8000000E
|
||||
|
||||
#ifndef DRM_MODE_FLAG_VID_MODE_PANEL
|
||||
#define DRM_MODE_FLAG_VID_MODE_PANEL 0x01
|
||||
|
13
msm/Kbuild
13
msm/Kbuild
@@ -12,6 +12,16 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_NEO), y)
|
||||
include $(DISPLAY_ROOT)/config/gki_neodisp.conf
|
||||
LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_neodispconf.h
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_PARROT), y)
|
||||
include $(DISPLAY_ROOT)/config/gki_parrotdisp.conf
|
||||
LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_parrotdispconf.h
|
||||
endif
|
||||
|
||||
#ifeq ($(CONFIG_ARCH_KALAMA), y)
|
||||
include $(DISPLAY_ROOT)/config/gki_kalamadisp.conf
|
||||
LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_kalamadispconf.h
|
||||
@@ -117,7 +127,8 @@ msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_altmode.o \
|
||||
sde_hdcp_1x.o \
|
||||
sde_hdcp_2x.o \
|
||||
dp/dp_pll.o \
|
||||
dp/dp_pll_5nm.o
|
||||
dp/dp_pll_5nm.o \
|
||||
dp/dp_pll_4nm.o
|
||||
|
||||
msm_drm-$(CONFIG_DRM_MSM_DP_MST) += dp/dp_mst_drm.o
|
||||
|
||||
|
@@ -124,6 +124,7 @@ struct dp_catalog_private {
|
||||
|
||||
char exe_mode[SZ_4];
|
||||
u32 dp_core_version;
|
||||
u32 dp_phy_version;
|
||||
};
|
||||
|
||||
static u32 dp_read_sw(struct dp_catalog_private *catalog,
|
||||
@@ -452,12 +453,20 @@ static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
|
||||
static bool dp_catalog_ctrl_wait_for_phy_ready(
|
||||
struct dp_catalog_private *catalog)
|
||||
{
|
||||
u32 reg = DP_PHY_STATUS, state;
|
||||
u32 phy_version;
|
||||
u32 reg, state;
|
||||
void __iomem *base = catalog->io.dp_phy->io.base;
|
||||
bool success = true;
|
||||
u32 const poll_sleep_us = 500;
|
||||
u32 const pll_timeout_us = 10000;
|
||||
|
||||
phy_version = dp_catalog_get_dp_phy_version(&catalog->dp_catalog);
|
||||
if (phy_version >= 0x60000000) {
|
||||
reg = DP_PHY_STATUS_V600;
|
||||
} else {
|
||||
reg = DP_PHY_STATUS;
|
||||
}
|
||||
|
||||
if (readl_poll_timeout_atomic((base + reg), state,
|
||||
((state & DP_PHY_READY) > 0),
|
||||
poll_sleep_us, pll_timeout_us)) {
|
||||
@@ -1684,9 +1693,17 @@ static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl,
|
||||
dp_write(DP_INTR_STATUS2, DP_INTR_MASK2);
|
||||
dp_write(DP_INTR_STATUS5, DP_INTR_MASK5);
|
||||
} else {
|
||||
/* disable interrupts */
|
||||
dp_write(DP_INTR_STATUS, 0x00);
|
||||
dp_write(DP_INTR_STATUS2, 0x00);
|
||||
dp_write(DP_INTR_STATUS5, 0x00);
|
||||
wmb();
|
||||
|
||||
/* clear all pending interrupts */
|
||||
dp_write(DP_INTR_STATUS, DP_INTERRUPT_STATUS1 << 1);
|
||||
dp_write(DP_INTR_STATUS2, DP_INTERRUPT_STATUS2 << 1);
|
||||
dp_write(DP_INTR_STATUS5, DP_INTERRUPT_STATUS5 << 1);
|
||||
wmb();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1967,6 +1984,29 @@ u32 dp_catalog_get_dp_core_version(struct dp_catalog *dp_catalog)
|
||||
return dp_read(DP_HW_VERSION);
|
||||
}
|
||||
|
||||
u32 dp_catalog_get_dp_phy_version(struct dp_catalog *dp_catalog)
|
||||
{
|
||||
struct dp_catalog_private *catalog;
|
||||
struct dp_io_data *io_data;
|
||||
|
||||
if (!dp_catalog) {
|
||||
DP_ERR("invalid input\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
|
||||
if (catalog->dp_phy_version)
|
||||
return catalog->dp_phy_version;
|
||||
|
||||
io_data = catalog->io.dp_phy;
|
||||
catalog->dp_phy_version = (dp_read(DP_PHY_REVISION_ID3) << 24) |
|
||||
(dp_read(DP_PHY_REVISION_ID2) << 16) |
|
||||
(dp_read(DP_PHY_REVISION_ID1) << 8) |
|
||||
dp_read(DP_PHY_REVISION_ID0);
|
||||
|
||||
return catalog->dp_phy_version;
|
||||
}
|
||||
|
||||
static int dp_catalog_reg_dump(struct dp_catalog *dp_catalog,
|
||||
char *name, u8 **out_buf, u32 *out_buf_len)
|
||||
{
|
||||
|
@@ -337,4 +337,5 @@ struct dp_catalog_sub *dp_catalog_get_v200(struct device *dev,
|
||||
struct dp_catalog *catalog, struct dp_catalog_io *io);
|
||||
|
||||
u32 dp_catalog_get_dp_core_version(struct dp_catalog *dp_catalog);
|
||||
u32 dp_catalog_get_dp_phy_version(struct dp_catalog *dp_catalog);
|
||||
#endif /* _DP_CATALOG_H_ */
|
||||
|
@@ -91,7 +91,7 @@ static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux,
|
||||
struct dp_catalog_private_v420 *catalog;
|
||||
struct dp_io_data *io_data;
|
||||
int i = 0;
|
||||
|
||||
u32 phy_version;
|
||||
if (!aux || !cfg) {
|
||||
DP_ERR("invalid input\n");
|
||||
return;
|
||||
@@ -103,10 +103,18 @@ static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux,
|
||||
dp_write(DP_PHY_PD_CTL, 0x67);
|
||||
wmb(); /* make sure PD programming happened */
|
||||
|
||||
phy_version = dp_catalog_get_dp_phy_version(catalog->dpc);
|
||||
if (phy_version >= 0x60000000) {
|
||||
/* Turn on BIAS current for PHY/PLL */
|
||||
io_data = catalog->io->dp_pll;
|
||||
dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600, 0x1D);
|
||||
wmb(); /* make sure BIAS programming happened */
|
||||
} else {
|
||||
/* Turn on BIAS current for PHY/PLL */
|
||||
io_data = catalog->io->dp_pll;
|
||||
dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17);
|
||||
wmb(); /* make sure BIAS programming happened */
|
||||
}
|
||||
|
||||
io_data = catalog->io->dp_phy;
|
||||
/* DP AUX CFG register programming */
|
||||
@@ -126,15 +134,18 @@ static void dp_catalog_aux_clear_hw_int_v420(struct dp_catalog_aux *aux)
|
||||
struct dp_catalog_private_v420 *catalog;
|
||||
struct dp_io_data *io_data;
|
||||
u32 data = 0;
|
||||
|
||||
u32 phy_version;
|
||||
if (!aux) {
|
||||
DP_ERR("invalid input\n");
|
||||
return;
|
||||
}
|
||||
|
||||
catalog = dp_catalog_get_priv_v420(aux);
|
||||
phy_version = dp_catalog_get_dp_phy_version(catalog->dpc);
|
||||
io_data = catalog->io->dp_phy;
|
||||
|
||||
if (phy_version >= 0x60000000)
|
||||
data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V600);
|
||||
else
|
||||
data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V420);
|
||||
|
||||
dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x1f);
|
||||
|
@@ -956,6 +956,26 @@ static void dp_ctrl_fec_setup(struct dp_ctrl_private *ctrl)
|
||||
DP_WARN("failed to enable sink fec\n");
|
||||
}
|
||||
|
||||
static int dp_ctrl_mst_send_act(struct dp_ctrl_private *ctrl)
|
||||
{
|
||||
bool act_complete;
|
||||
|
||||
if (!ctrl->mst_mode)
|
||||
return 0;
|
||||
|
||||
ctrl->catalog->trigger_act(ctrl->catalog);
|
||||
msleep(20); /* needs 1 frame time */
|
||||
|
||||
ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete);
|
||||
|
||||
if (!act_complete)
|
||||
DP_ERR("mst act trigger complete failed\n");
|
||||
else
|
||||
DP_MST_DEBUG("mst ACT trigger complete SUCCESS\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_ctrl_link_maintenance(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
int ret = 0;
|
||||
@@ -993,6 +1013,7 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl *dp_ctrl)
|
||||
|
||||
if (ctrl->stream_count) {
|
||||
dp_ctrl_send_video(ctrl);
|
||||
dp_ctrl_mst_send_act(ctrl);
|
||||
dp_ctrl_wait4video_ready(ctrl);
|
||||
dp_ctrl_fec_setup(ctrl);
|
||||
}
|
||||
@@ -1181,26 +1202,6 @@ static void dp_ctrl_mst_calculate_rg(struct dp_ctrl_private *ctrl,
|
||||
DP_DEBUG("x_int: %d, y_frac_enum: %d\n", x_int, y_frac_enum);
|
||||
}
|
||||
|
||||
static int dp_ctrl_mst_send_act(struct dp_ctrl_private *ctrl)
|
||||
{
|
||||
bool act_complete;
|
||||
|
||||
if (!ctrl->mst_mode)
|
||||
return 0;
|
||||
|
||||
ctrl->catalog->trigger_act(ctrl->catalog);
|
||||
msleep(20); /* needs 1 frame time */
|
||||
|
||||
ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete);
|
||||
|
||||
if (!act_complete)
|
||||
DP_ERR("mst act trigger complete failed\n");
|
||||
else
|
||||
DP_MST_DEBUG("mst ACT trigger complete SUCCESS\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl,
|
||||
struct dp_panel *panel)
|
||||
{
|
||||
|
@@ -13,6 +13,7 @@
|
||||
#include <linux/soc/qcom/fsa4480-i2c.h>
|
||||
#include <linux/usb/phy.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#include "sde_connector.h"
|
||||
|
||||
@@ -201,6 +202,8 @@ struct dp_display_private {
|
||||
u32 tot_dsc_blks_in_use;
|
||||
|
||||
bool process_hpd_connect;
|
||||
struct dev_pm_qos_request pm_qos_req[NR_CPUS];
|
||||
bool pm_qos_requested;
|
||||
|
||||
struct notifier_block usb_nb;
|
||||
};
|
||||
@@ -285,6 +288,36 @@ static void dp_audio_enable(struct dp_display_private *dp, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
static void dp_display_qos_request(struct dp_display_private *dp, bool add_vote)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
int cpu = 0;
|
||||
struct cpumask *cpu_mask;
|
||||
u32 latency = dp->parser->qos_cpu_latency;
|
||||
unsigned long mask = dp->parser->qos_cpu_mask;
|
||||
|
||||
if (!dp->parser->qos_cpu_mask || (dp->pm_qos_requested == add_vote))
|
||||
return;
|
||||
|
||||
cpu_mask = to_cpumask(&mask);
|
||||
for_each_cpu(cpu, cpu_mask) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev) {
|
||||
SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (add_vote)
|
||||
dev_pm_qos_add_request(cpu_dev, &dp->pm_qos_req[cpu],
|
||||
DEV_PM_QOS_RESUME_LATENCY, latency);
|
||||
else
|
||||
dev_pm_qos_remove_request(&dp->pm_qos_req[cpu]);
|
||||
}
|
||||
|
||||
SDE_EVT32_EXTERNAL(add_vote, mask, latency);
|
||||
dp->pm_qos_requested = add_vote;
|
||||
}
|
||||
|
||||
static void dp_display_update_hdcp_status(struct dp_display_private *dp,
|
||||
bool reset)
|
||||
{
|
||||
@@ -403,7 +436,7 @@ static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
|
||||
static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
|
||||
enum dp_stream_id stream_id)
|
||||
{
|
||||
if (dp->hdcp.ops->deregister_streams) {
|
||||
if (dp->hdcp.ops->deregister_streams && dp->active_panels[stream_id]) {
|
||||
struct stream_info stream = {stream_id,
|
||||
dp->active_panels[stream_id]->vcpi};
|
||||
|
||||
@@ -499,6 +532,11 @@ static void dp_display_hdcp_process_state(struct dp_display_private *dp)
|
||||
dp->debug->force_encryption && ops && ops->force_encryption)
|
||||
ops->force_encryption(data, dp->debug->force_encryption);
|
||||
|
||||
if (status->hdcp_state == HDCP_STATE_AUTHENTICATED)
|
||||
dp_display_qos_request(dp, false);
|
||||
else
|
||||
dp_display_qos_request(dp, true);
|
||||
|
||||
switch (status->hdcp_state) {
|
||||
case HDCP_STATE_INACTIVE:
|
||||
dp_display_hdcp_register_streams(dp);
|
||||
@@ -907,7 +945,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp)
|
||||
if (!dp->mst.cbs.hpd)
|
||||
goto skip_wait;
|
||||
|
||||
dp->mst.cbs.hpd(&dp->dp_display, true);
|
||||
dp->mst.cbs.hpd(&dp->dp_display, hpd);
|
||||
}
|
||||
|
||||
if (hpd) {
|
||||
@@ -1289,21 +1327,18 @@ static void dp_display_process_mst_hpd_low(struct dp_display_private *dp)
|
||||
|
||||
/*
|
||||
* HPD unplug callflow:
|
||||
* 1. send hpd unplug event with status=disconnected
|
||||
* 2. send hpd unplug on base connector so usermode can disable
|
||||
* 1. send hpd unplug on base connector so usermode can disable
|
||||
* all external displays.
|
||||
* 3. unset mst state in the topology mgr so the branch device
|
||||
* 2. unset mst state in the topology mgr so the branch device
|
||||
* can be cleaned up.
|
||||
*/
|
||||
if (dp->mst.cbs.hpd)
|
||||
dp->mst.cbs.hpd(&dp->dp_display, false);
|
||||
|
||||
if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
|
||||
dp_display_state_is(DP_STATE_ENABLED)))
|
||||
rc = dp_display_send_hpd_notification(dp);
|
||||
|
||||
dp_display_update_mst_state(dp, false);
|
||||
dp_display_set_mst_mgr_state(dp, false);
|
||||
dp_display_update_mst_state(dp, false);
|
||||
}
|
||||
|
||||
DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active);
|
||||
|
@@ -316,6 +316,7 @@ static int dp_hdcp2p2_authenticate(void *input)
|
||||
ctrl->sink_status = SINK_CONNECTED;
|
||||
atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
|
||||
|
||||
if (kthread_should_park())
|
||||
kthread_park(ctrl->thread);
|
||||
kfifo_reset(&ctrl->cmd_q);
|
||||
kthread_unpark(ctrl->thread);
|
||||
@@ -679,7 +680,7 @@ static int dp_hdcp2p2_cp_irq(void *input)
|
||||
|
||||
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
|
||||
atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
|
||||
DP_ERR("invalid hdcp state\n");
|
||||
DP_DEBUG("invalid hdcp state\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@@ -317,7 +317,7 @@ static int dp_parser_get_vreg(struct dp_parser *parser,
|
||||
pm_supply_name = dp_parser_supply_node_name(module);
|
||||
supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
|
||||
if (!supply_root_node) {
|
||||
DP_WARN("no supply entry present: %s\n", pm_supply_name);
|
||||
DP_DEBUG("no supply entry present: %s\n", pm_supply_name);
|
||||
goto novreg;
|
||||
}
|
||||
|
||||
@@ -742,6 +742,26 @@ static void dp_parser_dsc(struct dp_parser *parser)
|
||||
parser->dsc_continuous_pps);
|
||||
}
|
||||
|
||||
static void dp_parser_qos(struct dp_parser *parser)
|
||||
{
|
||||
struct device *dev = &parser->pdev->dev;
|
||||
u32 mask, latency;
|
||||
int rc;
|
||||
|
||||
rc = of_property_read_u32(dev->of_node, "qcom,qos-cpu-latency-us", &latency);
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
rc = of_property_read_u32(dev->of_node, "qcom,qos-cpu-mask", &mask);
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
parser->qos_cpu_mask = mask;
|
||||
parser->qos_cpu_latency = latency;
|
||||
|
||||
DP_DEBUG("qos parsing successful. mask:%x latency:%ld\n", mask, latency);
|
||||
}
|
||||
|
||||
static void dp_parser_fec(struct dp_parser *parser)
|
||||
{
|
||||
struct device *dev = &parser->pdev->dev;
|
||||
@@ -817,6 +837,7 @@ static int dp_parser_parse(struct dp_parser *parser)
|
||||
dp_parser_dsc(parser);
|
||||
dp_parser_fec(parser);
|
||||
dp_parser_widebus(parser);
|
||||
dp_parser_qos(parser);
|
||||
err:
|
||||
return rc;
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _DP_PARSER_H_
|
||||
@@ -198,6 +198,8 @@ static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type)
|
||||
* @dsc_continuous_pps: PPS sent every frame by HW
|
||||
* @has_widebus: widebus (2PPC) feature eanble status
|
||||
*@mst_fixed_port: mst port_num reserved for fixed topology
|
||||
* @qos_cpu_mask: CPU mask for QOS
|
||||
* @qos_cpu_latency: CPU Latency setting for QOS
|
||||
* @parse: function to be called by client to parse device tree.
|
||||
* @get_io: function to be called by client to get io data.
|
||||
* @get_io_buf: function to be called by client to get io buffers.
|
||||
@@ -227,6 +229,8 @@ struct dp_parser {
|
||||
bool gpio_aux_switch;
|
||||
bool lphw_hpd;
|
||||
u32 mst_fixed_port[MAX_DP_MST_STREAMS];
|
||||
u32 qos_cpu_mask;
|
||||
unsigned long qos_cpu_latency;
|
||||
|
||||
int (*parse)(struct dp_parser *parser);
|
||||
struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
|
||||
|
@@ -54,6 +54,9 @@ static int dp_pll_clock_register(struct dp_pll *pll)
|
||||
case DP_PLL_5NM_V2:
|
||||
rc = dp_pll_clock_register_5nm(pll);
|
||||
break;
|
||||
case DP_PLL_4NM_V1:
|
||||
rc = dp_pll_clock_register_4nm(pll);
|
||||
break;
|
||||
default:
|
||||
rc = -ENOTSUPP;
|
||||
break;
|
||||
@@ -69,6 +72,9 @@ static void dp_pll_clock_unregister(struct dp_pll *pll)
|
||||
case DP_PLL_5NM_V2:
|
||||
dp_pll_clock_unregister_5nm(pll);
|
||||
break;
|
||||
case DP_PLL_4NM_V1:
|
||||
dp_pll_clock_unregister_4nm(pll);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -131,6 +137,8 @@ struct dp_pll *dp_pll_get(struct dp_pll_in *in)
|
||||
pll->revision = DP_PLL_5NM_V1;
|
||||
} else if (!strcmp(label, "5nm-v2")) {
|
||||
pll->revision = DP_PLL_5NM_V2;
|
||||
} else if (!strcmp(label, "4nm-v1")) {
|
||||
pll->revision = DP_PLL_4NM_V1;
|
||||
} else {
|
||||
DP_ERR("Unsupported pll revision\n");
|
||||
rc = -ENOTSUPP;
|
||||
|
@@ -34,6 +34,7 @@ enum dp_pll_revision {
|
||||
DP_PLL_UNKNOWN,
|
||||
DP_PLL_5NM_V1,
|
||||
DP_PLL_5NM_V2,
|
||||
DP_PLL_4NM_V1,
|
||||
};
|
||||
|
||||
static inline const char *dp_pll_get_revision(enum dp_pll_revision rev)
|
||||
@@ -42,6 +43,7 @@ static inline const char *dp_pll_get_revision(enum dp_pll_revision rev)
|
||||
case DP_PLL_UNKNOWN: return "DP_PLL_UNKNOWN";
|
||||
case DP_PLL_5NM_V1: return "DP_PLL_5NM_V1";
|
||||
case DP_PLL_5NM_V2: return "DP_PLL_5NM_V2";
|
||||
case DP_PLL_4NM_V1: return "DP_PLL_4NM_V1";
|
||||
default: return "???";
|
||||
}
|
||||
}
|
||||
@@ -107,7 +109,9 @@ struct dp_pll_db {
|
||||
u32 lock_cmp_en;
|
||||
u32 ssc_step_size1_mode0;
|
||||
u32 ssc_step_size2_mode0;
|
||||
|
||||
u32 ssc_per1;
|
||||
u32 cmp_code1_mode0;
|
||||
u32 cmp_code2_mode0;
|
||||
/* PHY vco divider */
|
||||
u32 phy_vco_div;
|
||||
};
|
||||
@@ -124,6 +128,8 @@ static inline bool is_gdsc_disabled(struct dp_pll *pll)
|
||||
|
||||
int dp_pll_clock_register_5nm(struct dp_pll *pll);
|
||||
void dp_pll_clock_unregister_5nm(struct dp_pll *pll);
|
||||
int dp_pll_clock_register_4nm(struct dp_pll *pll);
|
||||
void dp_pll_clock_unregister_4nm(struct dp_pll *pll);
|
||||
|
||||
struct dp_pll_in {
|
||||
struct platform_device *pdev;
|
||||
|
930
msm/dp/dp_pll_4nm.c
普通文件
930
msm/dp/dp_pll_4nm.c
普通文件
@@ -0,0 +1,930 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Display Port PLL driver block diagram for branch clocks
|
||||
*
|
||||
* +------------------------+ +------------------------+
|
||||
* | dp_phy_pll_link_clk | | dp_phy_pll_vco_div_clk |
|
||||
* +------------------------+ +------------------------+
|
||||
* | |
|
||||
* | |
|
||||
* V V
|
||||
* dp_link_clk dp_pixel_clk
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/regmap.h>
|
||||
#include "clk-regmap-mux.h"
|
||||
#include "dp_hpd.h"
|
||||
#include "dp_debug.h"
|
||||
#include "dp_pll.h"
|
||||
|
||||
#define DP_PHY_CFG 0x0010
|
||||
#define DP_PHY_CFG_1 0x0014
|
||||
#define DP_PHY_PD_CTL 0x0018
|
||||
#define DP_PHY_MODE 0x001C
|
||||
|
||||
#define DP_PHY_AUX_CFG1 0x0024
|
||||
#define DP_PHY_AUX_CFG2 0x0028
|
||||
|
||||
#define DP_PHY_VCO_DIV 0x0070
|
||||
#define DP_PHY_TX0_TX1_LANE_CTL 0x0078
|
||||
#define DP_PHY_TX2_TX3_LANE_CTL 0x009C
|
||||
|
||||
#define DP_PHY_SPARE0 0x00C8
|
||||
#define DP_PHY_STATUS 0x00E4
|
||||
|
||||
/* Tx registers */
|
||||
#define TXn_CLKBUF_ENABLE 0x0008
|
||||
#define TXn_TX_EMP_POST1_LVL 0x000C
|
||||
|
||||
#define TXn_TX_DRV_LVL 0x0014
|
||||
|
||||
#define TXn_RESET_TSYNC_EN 0x001C
|
||||
#define TXn_PRE_STALL_LDO_BOOST_EN 0x0020
|
||||
#define TXn_TX_BAND 0x0024
|
||||
#define TXn_INTERFACE_SELECT 0x002C
|
||||
|
||||
#define TXn_RES_CODE_LANE_OFFSET_TX 0x003C
|
||||
#define TXn_RES_CODE_LANE_OFFSET_RX 0x0040
|
||||
|
||||
#define TXn_TRANSCEIVER_BIAS_EN 0x0054
|
||||
#define TXn_HIGHZ_DRVR_EN 0x0058
|
||||
#define TXn_TX_POL_INV 0x005C
|
||||
#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0060
|
||||
|
||||
/* PLL register offset */
|
||||
#define QSERDES_COM_BG_TIMER 0x00BC
|
||||
#define QSERDES_COM_SSC_EN_CENTER 0x00C0
|
||||
#define QSERDES_COM_SSC_ADJ_PER1 0x00C4
|
||||
#define QSERDES_COM_SSC_PER1 0x00CC
|
||||
#define QSERDES_COM_SSC_PER2 0x00D0
|
||||
#define QSERDES_COM_SSC_STEP_SIZE1_MODE0 0x0060
|
||||
#define QSERDES_COM_SSC_STEP_SIZE2_MODE0 0X0064
|
||||
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00DC
|
||||
#define QSERDES_COM_CLK_ENABLE1 0x00E0
|
||||
#define QSERDES_COM_SYS_CLK_CTRL 0x00E4
|
||||
#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x00E8
|
||||
#define QSERDES_COM_PLL_IVCO 0x00F4
|
||||
|
||||
#define QSERDES_COM_CP_CTRL_MODE0 0x0070
|
||||
#define QSERDES_COM_PLL_RCTRL_MODE0 0x0074
|
||||
#define QSERDES_COM_PLL_CCTRL_MODE0 0x0078
|
||||
#define QSERDES_COM_SYSCLK_EN_SEL 0x0110
|
||||
#define QSERDES_COM_RESETSM_CNTRL 0x0118
|
||||
#define QSERDES_COM_LOCK_CMP_EN 0x0120
|
||||
#define QSERDES_COM_LOCK_CMP1_MODE0 0x0080
|
||||
#define QSERDES_COM_LOCK_CMP2_MODE0 0x0084
|
||||
|
||||
#define QSERDES_COM_DEC_START_MODE0 0x0088
|
||||
#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0090
|
||||
#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0094
|
||||
#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0098
|
||||
#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00A0
|
||||
#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00A4
|
||||
#define QSERDES_COM_VCO_TUNE_CTRL 0x013C
|
||||
#define QSERDES_COM_VCO_TUNE_MAP 0x0140
|
||||
|
||||
#define QSERDES_COM_CMN_STATUS 0x01D0
|
||||
#define QSERDES_COM_CLK_SEL 0x0164
|
||||
#define QSERDES_COM_HSCLK_SEL_1 0x003C
|
||||
|
||||
#define QSERDES_COM_CORECLK_DIV_MODE0 0x007C
|
||||
|
||||
#define QSERDES_COM_CORE_CLK_EN 0x0170
|
||||
#define QSERDES_COM_C_READY_STATUS 0x01F8
|
||||
#define QSERDES_COM_CMN_CONFIG_1 0x0174
|
||||
|
||||
#define QSERDES_COM_SVS_MODE_CLK_SEL 0x017C
|
||||
#define QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x0058
|
||||
#define QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x005C
|
||||
/* Tx tran offsets */
|
||||
#define DP_TRAN_DRVR_EMP_EN 0x00C0
|
||||
#define DP_TX_INTERFACE_MODE 0x00C4
|
||||
|
||||
/* Tx VMODE offsets */
|
||||
#define DP_VMODE_CTRL1 0x00C8
|
||||
|
||||
#define DP_PHY_PLL_POLL_SLEEP_US 500
|
||||
#define DP_PHY_PLL_POLL_TIMEOUT_US 10000
|
||||
|
||||
#define DP_VCO_RATE_8100MHZDIV1000 8100000UL
|
||||
#define DP_VCO_RATE_9720MHZDIV1000 9720000UL
|
||||
#define DP_VCO_RATE_10800MHZDIV1000 10800000UL
|
||||
|
||||
#define DP_PLL_NUM_CLKS 2
|
||||
|
||||
#define DP_4NM_C_READY BIT(0)
|
||||
#define DP_4NM_FREQ_DONE BIT(0)
|
||||
#define DP_4NM_PLL_LOCKED BIT(1)
|
||||
#define DP_4NM_PHY_READY BIT(1)
|
||||
#define DP_4NM_TSYNC_DONE BIT(0)
|
||||
|
||||
static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_gdsc_disabled(pll))
|
||||
return -EINVAL;
|
||||
|
||||
val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV);
|
||||
val &= ~0x03;
|
||||
|
||||
switch (div) {
|
||||
case 2:
|
||||
val |= 1;
|
||||
break;
|
||||
case 4:
|
||||
val |= 2;
|
||||
break;
|
||||
case 6:
|
||||
/* When div = 6, val is 0, so do nothing here */
|
||||
;
|
||||
break;
|
||||
case 8:
|
||||
val |= 3;
|
||||
break;
|
||||
default:
|
||||
DP_DEBUG("unsupported div value %d\n", div);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_VCO_DIV, val);
|
||||
/* Make sure the PHY registers writes are done */
|
||||
wmb();
|
||||
|
||||
DP_DEBUG("val=%d div=%x\n", val, div);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_vco_div(struct dp_pll *pll, unsigned long rate)
|
||||
{
|
||||
int div;
|
||||
int rc = 0;
|
||||
|
||||
if (rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
|
||||
div = 6;
|
||||
else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
|
||||
div = 4;
|
||||
else
|
||||
div = 2;
|
||||
|
||||
rc = dp_vco_clk_set_div(pll, div);
|
||||
if (rc < 0) {
|
||||
DP_DEBUG("set vco div failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_vco_pll_init_db_4nm(struct dp_pll_db *pdb,
|
||||
unsigned long rate)
|
||||
{
|
||||
struct dp_pll *pll = pdb->pll;
|
||||
u32 spare_value = 0;
|
||||
|
||||
spare_value = dp_pll_read(dp_phy, DP_PHY_SPARE0);
|
||||
pdb->lane_cnt = spare_value & 0x0F;
|
||||
pdb->orientation = (spare_value & 0xF0) >> 4;
|
||||
|
||||
DP_DEBUG("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
|
||||
spare_value, pdb->lane_cnt, pdb->orientation);
|
||||
|
||||
pdb->div_frac_start1_mode0 = 0x00;
|
||||
pdb->integloop_gain0_mode0 = 0x3f;
|
||||
pdb->integloop_gain1_mode0 = 0x00;
|
||||
|
||||
switch (rate) {
|
||||
case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
|
||||
DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000);
|
||||
pdb->hsclk_sel = 0x05;
|
||||
pdb->dec_start_mode0 = 0x69;
|
||||
pdb->div_frac_start2_mode0 = 0x80;
|
||||
pdb->div_frac_start3_mode0 = 0x07;
|
||||
pdb->lock_cmp1_mode0 = 0x6f;
|
||||
pdb->lock_cmp2_mode0 = 0x08;
|
||||
pdb->phy_vco_div = 0x1;
|
||||
pdb->lock_cmp_en = 0x04;
|
||||
pdb->ssc_step_size1_mode0 = 0x45;
|
||||
pdb->ssc_step_size2_mode0 = 0x06;
|
||||
pdb->ssc_per1 = 0x36;
|
||||
pdb->cmp_code1_mode0 = 0xE2;
|
||||
pdb->cmp_code2_mode0 = 0x18;
|
||||
break;
|
||||
case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
|
||||
DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
|
||||
pdb->hsclk_sel = 0x03;
|
||||
pdb->dec_start_mode0 = 0x69;
|
||||
pdb->div_frac_start2_mode0 = 0x80;
|
||||
pdb->div_frac_start3_mode0 = 0x07;
|
||||
pdb->lock_cmp1_mode0 = 0x0f;
|
||||
pdb->lock_cmp2_mode0 = 0x0e;
|
||||
pdb->phy_vco_div = 0x1;
|
||||
pdb->lock_cmp_en = 0x08;
|
||||
pdb->ssc_step_size1_mode0 = 0x13;
|
||||
pdb->ssc_step_size2_mode0 = 0x06;
|
||||
pdb->ssc_per1 = 0x40;
|
||||
pdb->cmp_code1_mode0 = 0xE2;
|
||||
pdb->cmp_code2_mode0 = 0x18;
|
||||
break;
|
||||
case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
|
||||
DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
|
||||
pdb->hsclk_sel = 0x01;
|
||||
pdb->dec_start_mode0 = 0x8c;
|
||||
pdb->div_frac_start2_mode0 = 0x00;
|
||||
pdb->div_frac_start3_mode0 = 0x0a;
|
||||
pdb->lock_cmp1_mode0 = 0x1f;
|
||||
pdb->lock_cmp2_mode0 = 0x1c;
|
||||
pdb->phy_vco_div = 0x2;
|
||||
pdb->lock_cmp_en = 0x08;
|
||||
pdb->ssc_step_size1_mode0 = 0x1a;
|
||||
pdb->ssc_step_size2_mode0 = 0x08;
|
||||
pdb->ssc_per1 = 0x40;
|
||||
pdb->cmp_code1_mode0 = 0x2E;
|
||||
pdb->cmp_code2_mode0 = 0x21;
|
||||
break;
|
||||
case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
|
||||
DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000);
|
||||
pdb->hsclk_sel = 0x00;
|
||||
pdb->dec_start_mode0 = 0x69;
|
||||
pdb->div_frac_start2_mode0 = 0x80;
|
||||
pdb->div_frac_start3_mode0 = 0x07;
|
||||
pdb->lock_cmp1_mode0 = 0x2f;
|
||||
pdb->lock_cmp2_mode0 = 0x2a;
|
||||
pdb->phy_vco_div = 0x0;
|
||||
pdb->lock_cmp_en = 0x08;
|
||||
pdb->ssc_step_size1_mode0 = 0x13;
|
||||
pdb->ssc_step_size2_mode0 = 0x06;
|
||||
pdb->ssc_per1 = 0x40;
|
||||
pdb->cmp_code1_mode0 = 0xE2;
|
||||
pdb->cmp_code2_mode0 = 0x18;
|
||||
break;
|
||||
default:
|
||||
DP_ERR("unsupported rate %ld\n", rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_config_vco_rate_4nm(struct dp_pll *pll,
|
||||
unsigned long rate)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dp_pll_db *pdb = (struct dp_pll_db *)pll->priv;
|
||||
|
||||
rc = dp_vco_pll_init_db_4nm(pdb, rate);
|
||||
if (rc < 0) {
|
||||
DP_ERR("VCO Init DB failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_CFG_1, 0x0F);
|
||||
|
||||
if (pdb->lane_cnt != 4) {
|
||||
if (pdb->orientation == ORIENTATION_CC2)
|
||||
dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x6d);
|
||||
else
|
||||
dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x75);
|
||||
} else {
|
||||
dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x7d);
|
||||
}
|
||||
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SVS_MODE_CLK_SEL, 0x15);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_EN_SEL, 0x3b);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SYS_CLK_CTRL, 0x02);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_CLK_ENABLE1, 0x0c);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_CLK_SEL, 0x30);
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
/* PLL Optimization */
|
||||
dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, 0x0f);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_CP_CTRL_MODE0, 0x06);
|
||||
/* Make sure the PLL register writes are done */
|
||||
wmb();
|
||||
|
||||
/* link rate dependent params */
|
||||
dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL_1, pdb->hsclk_sel);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
|
||||
dp_pll_write(dp_pll,
|
||||
QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
|
||||
dp_pll_write(dp_pll,
|
||||
QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
|
||||
dp_pll_write(dp_pll,
|
||||
QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
|
||||
dp_pll_write(dp_phy, DP_PHY_VCO_DIV, pdb->phy_vco_div);
|
||||
/* Make sure the PLL register writes are done */
|
||||
wmb();
|
||||
|
||||
dp_pll_write(dp_pll, QSERDES_COM_CMN_CONFIG_1, 0x12);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_MAP, 0x00);
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, 0x0e);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_CORECLK_DIV_MODE0, 0x14);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
|
||||
|
||||
if (pll->bonding_en)
|
||||
dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1f);
|
||||
else
|
||||
dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1D);
|
||||
|
||||
dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, 0x1f);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, pdb->cmp_code1_mode0);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, pdb->cmp_code2_mode0);
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
if (pll->ssc_en) {
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SSC_EN_CENTER, 0x01);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SSC_ADJ_PER1, 0x00);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, pdb->ssc_per1);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, 0x01);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE1_MODE0,
|
||||
pdb->ssc_step_size1_mode0);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE2_MODE0,
|
||||
pdb->ssc_step_size2_mode0);
|
||||
}
|
||||
|
||||
if (pdb->orientation == ORIENTATION_CC2)
|
||||
dp_pll_write(dp_phy, DP_PHY_MODE, 0x4c);
|
||||
else
|
||||
dp_pll_write(dp_phy, DP_PHY_MODE, 0x5c);
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_AUX_CFG1, 0x13);
|
||||
dp_pll_write(dp_phy, DP_PHY_AUX_CFG2, 0xA4);
|
||||
/* Make sure the PLL register writes are done */
|
||||
wmb();
|
||||
|
||||
/* TX-0 register configuration */
|
||||
dp_pll_write(dp_phy, DP_PHY_TX0_TX1_LANE_CTL, 0x05);
|
||||
dp_pll_write(dp_ln_tx0, DP_VMODE_CTRL1, 0x40);
|
||||
dp_pll_write(dp_ln_tx0, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
|
||||
dp_pll_write(dp_ln_tx0, TXn_INTERFACE_SELECT, 0x3b);
|
||||
dp_pll_write(dp_ln_tx0, TXn_CLKBUF_ENABLE, 0x0f);
|
||||
dp_pll_write(dp_ln_tx0, TXn_RESET_TSYNC_EN, 0x03);
|
||||
dp_pll_write(dp_ln_tx0, DP_TRAN_DRVR_EMP_EN, 0xf);
|
||||
dp_pll_write(dp_ln_tx0, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
|
||||
dp_pll_write(dp_ln_tx0, DP_TX_INTERFACE_MODE, 0x00);
|
||||
dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
|
||||
dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
|
||||
dp_pll_write(dp_ln_tx0, TXn_TX_BAND, 0x04);
|
||||
/* Make sure the PLL register writes are done */
|
||||
wmb();
|
||||
|
||||
/* TX-1 register configuration */
|
||||
dp_pll_write(dp_phy, DP_PHY_TX2_TX3_LANE_CTL, 0x05);
|
||||
dp_pll_write(dp_ln_tx1, DP_VMODE_CTRL1, 0x40);
|
||||
dp_pll_write(dp_ln_tx1, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
|
||||
dp_pll_write(dp_ln_tx1, TXn_INTERFACE_SELECT, 0x3b);
|
||||
dp_pll_write(dp_ln_tx1, TXn_CLKBUF_ENABLE, 0x0f);
|
||||
dp_pll_write(dp_ln_tx1, TXn_RESET_TSYNC_EN, 0x03);
|
||||
dp_pll_write(dp_ln_tx1, DP_TRAN_DRVR_EMP_EN, 0xf);
|
||||
dp_pll_write(dp_ln_tx1, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
|
||||
dp_pll_write(dp_ln_tx1, DP_TX_INTERFACE_MODE, 0x00);
|
||||
dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
|
||||
dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
|
||||
dp_pll_write(dp_ln_tx1, TXn_TX_BAND, 0x04);
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
return set_vco_div(pll, rate);
|
||||
}
|
||||
|
||||
enum dp_4nm_pll_status {
|
||||
C_READY,
|
||||
FREQ_DONE,
|
||||
PLL_LOCKED,
|
||||
PHY_READY,
|
||||
TSYNC_DONE,
|
||||
};
|
||||
|
||||
char *dp_4nm_pll_get_status_name(enum dp_4nm_pll_status status)
|
||||
{
|
||||
switch (status) {
|
||||
case C_READY:
|
||||
return "C_READY";
|
||||
case FREQ_DONE:
|
||||
return "FREQ_DONE";
|
||||
case PLL_LOCKED:
|
||||
return "PLL_LOCKED";
|
||||
case PHY_READY:
|
||||
return "PHY_READY";
|
||||
case TSYNC_DONE:
|
||||
return "TSYNC_DONE";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
static bool dp_4nm_pll_get_status(struct dp_pll *pll,
|
||||
enum dp_4nm_pll_status status)
|
||||
{
|
||||
u32 reg, state, bit;
|
||||
void __iomem *base;
|
||||
bool success = true;
|
||||
|
||||
switch (status) {
|
||||
case C_READY:
|
||||
base = dp_pll_get_base(dp_pll);
|
||||
reg = QSERDES_COM_C_READY_STATUS;
|
||||
bit = DP_4NM_C_READY;
|
||||
break;
|
||||
case FREQ_DONE:
|
||||
base = dp_pll_get_base(dp_pll);
|
||||
reg = QSERDES_COM_CMN_STATUS;
|
||||
bit = DP_4NM_FREQ_DONE;
|
||||
break;
|
||||
case PLL_LOCKED:
|
||||
base = dp_pll_get_base(dp_pll);
|
||||
reg = QSERDES_COM_CMN_STATUS;
|
||||
bit = DP_4NM_PLL_LOCKED;
|
||||
break;
|
||||
case PHY_READY:
|
||||
base = dp_pll_get_base(dp_phy);
|
||||
reg = DP_PHY_STATUS;
|
||||
bit = DP_4NM_PHY_READY;
|
||||
break;
|
||||
case TSYNC_DONE:
|
||||
base = dp_pll_get_base(dp_phy);
|
||||
reg = DP_PHY_STATUS;
|
||||
bit = DP_4NM_TSYNC_DONE;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
if (readl_poll_timeout_atomic((base + reg), state,
|
||||
((state & bit) > 0),
|
||||
DP_PHY_PLL_POLL_SLEEP_US,
|
||||
DP_PHY_PLL_POLL_TIMEOUT_US)) {
|
||||
DP_ERR("%s failed, status=%x\n",
|
||||
dp_4nm_pll_get_status_name(status), state);
|
||||
|
||||
success = false;
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
static int dp_pll_enable_4nm(struct dp_pll *pll)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
pll->aux->state &= ~DP_STATE_PLL_LOCKED;
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_CFG, 0x01);
|
||||
dp_pll_write(dp_phy, DP_PHY_CFG, 0x05);
|
||||
dp_pll_write(dp_phy, DP_PHY_CFG, 0x01);
|
||||
dp_pll_write(dp_phy, DP_PHY_CFG, 0x09);
|
||||
dp_pll_write(dp_pll, QSERDES_COM_RESETSM_CNTRL, 0x20);
|
||||
wmb(); /* Make sure the PLL register writes are done */
|
||||
|
||||
if (!dp_4nm_pll_get_status(pll, C_READY)) {
|
||||
rc = -EINVAL;
|
||||
goto lock_err;
|
||||
}
|
||||
|
||||
if (!dp_4nm_pll_get_status(pll, FREQ_DONE)) {
|
||||
rc = -EINVAL;
|
||||
goto lock_err;
|
||||
}
|
||||
|
||||
if (!dp_4nm_pll_get_status(pll, PLL_LOCKED)) {
|
||||
rc = -EINVAL;
|
||||
goto lock_err;
|
||||
}
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_CFG, 0x19);
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
if (!dp_4nm_pll_get_status(pll, TSYNC_DONE)) {
|
||||
rc = -EINVAL;
|
||||
goto lock_err;
|
||||
}
|
||||
|
||||
if (!dp_4nm_pll_get_status(pll, PHY_READY)) {
|
||||
rc = -EINVAL;
|
||||
goto lock_err;
|
||||
}
|
||||
|
||||
pll->aux->state |= DP_STATE_PLL_LOCKED;
|
||||
DP_DEBUG("PLL is locked\n");
|
||||
|
||||
lock_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dp_pll_disable_4nm(struct dp_pll *pll)
|
||||
{
|
||||
/* Assert DP PHY power down */
|
||||
dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x2);
|
||||
/*
|
||||
* Make sure all the register writes to disable PLL are
|
||||
* completed before doing any other operation
|
||||
*/
|
||||
wmb();
|
||||
}
|
||||
|
||||
static int dp_vco_set_rate_4nm(struct dp_pll *pll, unsigned long rate)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DP_DEBUG("DP lane CLK rate=%ld\n", rate);
|
||||
|
||||
rc = dp_config_vco_rate_4nm(pll, rate);
|
||||
if (rc < 0) {
|
||||
DP_ERR("Failed to set clk rate\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_regulator_enable_4nm(struct dp_parser *parser,
|
||||
enum dp_pm_type pm_type, bool enable)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dss_module_power mp;
|
||||
|
||||
if (pm_type < DP_CORE_PM || pm_type >= DP_MAX_PM) {
|
||||
DP_ERR("invalid resource: %d %s\n", pm_type,
|
||||
dp_parser_pm_name(pm_type));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mp = parser->mp[pm_type];
|
||||
rc = msm_dss_enable_vreg(mp.vreg_config, mp.num_vreg, enable);
|
||||
if (rc) {
|
||||
DP_ERR("failed to '%s' vregs for %s\n",
|
||||
enable ? "enable" : "disable",
|
||||
dp_parser_pm_name(pm_type));
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_DEBUG("success: '%s' vregs for %s\n", enable ? "enable" : "disable",
|
||||
dp_parser_pm_name(pm_type));
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_pll_configure(struct dp_pll *pll, unsigned long rate)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!pll || !rate) {
|
||||
DP_ERR("invalid input parameters rate = %lu\n", rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rate = rate * 10;
|
||||
|
||||
if (rate <= DP_VCO_HSCLK_RATE_1620MHZDIV1000)
|
||||
rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
|
||||
else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
|
||||
rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
|
||||
else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
|
||||
rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
|
||||
else
|
||||
rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
|
||||
|
||||
rc = dp_vco_set_rate_4nm(pll, rate);
|
||||
if (rc < 0) {
|
||||
DP_ERR("pll rate %s set failed\n", rate);
|
||||
return rc;
|
||||
}
|
||||
|
||||
pll->vco_rate = rate;
|
||||
DP_DEBUG("pll rate %lu set success\n", rate);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_pll_prepare(struct dp_pll *pll)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable DP_PM_PLL regulator if the PLL revision is 4nm-V1 and the
|
||||
* link rate is 8.1Gbps. This will result in voting to place Mx rail in
|
||||
* turbo as required for V1 hardware PLL functionality.
|
||||
*/
|
||||
if (pll->revision == DP_PLL_4NM_V1 &&
|
||||
pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) {
|
||||
rc = dp_regulator_enable_4nm(pll->parser, DP_PLL_PM, true);
|
||||
if (rc < 0) {
|
||||
DP_ERR("enable pll power failed\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
rc = dp_pll_enable_4nm(pll);
|
||||
if (rc < 0)
|
||||
DP_ERR("ndx=%d failed to enable dp pll\n", pll->index);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_pll_unprepare(struct dp_pll *pll)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameter\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pll->revision == DP_PLL_4NM_V1 &&
|
||||
pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) {
|
||||
rc = dp_regulator_enable_4nm(pll->parser, DP_PLL_PM, false);
|
||||
if (rc < 0) {
|
||||
DP_ERR("disable pll power failed\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
dp_pll_disable_4nm(pll);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
unsigned long dp_vco_recalc_rate_4nm(struct dp_pll *pll)
|
||||
{
|
||||
u32 hsclk_sel, link_clk_divsel, hsclk_div, link_clk_div = 0;
|
||||
unsigned long vco_rate = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_gdsc_disabled(pll))
|
||||
return 0;
|
||||
|
||||
hsclk_sel = dp_pll_read(dp_pll, QSERDES_COM_HSCLK_SEL_1);
|
||||
hsclk_sel &= 0x0f;
|
||||
|
||||
switch (hsclk_sel) {
|
||||
case 5:
|
||||
hsclk_div = 5;
|
||||
break;
|
||||
case 3:
|
||||
hsclk_div = 3;
|
||||
break;
|
||||
case 1:
|
||||
hsclk_div = 2;
|
||||
break;
|
||||
case 0:
|
||||
hsclk_div = 1;
|
||||
break;
|
||||
default:
|
||||
DP_DEBUG("unknown divider. forcing to default\n");
|
||||
hsclk_div = 5;
|
||||
break;
|
||||
}
|
||||
|
||||
link_clk_divsel = dp_pll_read(dp_phy, DP_PHY_AUX_CFG2);
|
||||
link_clk_divsel >>= 2;
|
||||
link_clk_divsel &= 0x3;
|
||||
|
||||
if (link_clk_divsel == 0)
|
||||
link_clk_div = 5;
|
||||
else if (link_clk_divsel == 1)
|
||||
link_clk_div = 10;
|
||||
else if (link_clk_divsel == 2)
|
||||
link_clk_div = 20;
|
||||
else
|
||||
DP_ERR("unsupported div. Phy_mode: %d\n", link_clk_divsel);
|
||||
|
||||
if (link_clk_div == 20) {
|
||||
vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
|
||||
} else {
|
||||
if (hsclk_div == 5)
|
||||
vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
|
||||
else if (hsclk_div == 3)
|
||||
vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
|
||||
else if (hsclk_div == 2)
|
||||
vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
|
||||
else
|
||||
vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
|
||||
}
|
||||
|
||||
DP_DEBUG("hsclk: sel=0x%x, div=0x%x; lclk: sel=%u, div=%u, rate=%lu\n",
|
||||
hsclk_sel, hsclk_div, link_clk_divsel, link_clk_div, vco_rate);
|
||||
|
||||
return vco_rate;
|
||||
}
|
||||
|
||||
static unsigned long dp_pll_link_clk_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct dp_pll *pll = NULL;
|
||||
struct dp_pll_vco_clk *pll_link = NULL;
|
||||
unsigned long rate = 0;
|
||||
|
||||
if (!hw) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pll_link = to_dp_vco_hw(hw);
|
||||
pll = pll_link->priv;
|
||||
|
||||
rate = pll->vco_rate;
|
||||
rate = pll->vco_rate / 10;
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
||||
static long dp_pll_link_clk_round(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long *parent_rate)
|
||||
{
|
||||
struct dp_pll *pll = NULL;
|
||||
struct dp_pll_vco_clk *pll_link = NULL;
|
||||
|
||||
if (!hw) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pll_link = to_dp_vco_hw(hw);
|
||||
pll = pll_link->priv;
|
||||
|
||||
rate = pll->vco_rate / 10;
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
||||
static unsigned long dp_pll_vco_div_clk_get_rate(struct dp_pll *pll)
|
||||
{
|
||||
if (pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
|
||||
return (pll->vco_rate / 6);
|
||||
else if (pll->vco_rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
|
||||
return (pll->vco_rate / 4);
|
||||
else
|
||||
return (pll->vco_rate / 2);
|
||||
}
|
||||
|
||||
static unsigned long dp_pll_vco_div_clk_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct dp_pll *pll = NULL;
|
||||
struct dp_pll_vco_clk *pll_link = NULL;
|
||||
|
||||
if (!hw) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pll_link = to_dp_vco_hw(hw);
|
||||
pll = pll_link->priv;
|
||||
|
||||
return dp_pll_vco_div_clk_get_rate(pll);
|
||||
}
|
||||
|
||||
static long dp_pll_vco_div_clk_round(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long *parent_rate)
|
||||
{
|
||||
return dp_pll_vco_div_clk_recalc_rate(hw, *parent_rate);
|
||||
}
|
||||
|
||||
static const struct clk_ops pll_link_clk_ops = {
|
||||
.recalc_rate = dp_pll_link_clk_recalc_rate,
|
||||
.round_rate = dp_pll_link_clk_round,
|
||||
};
|
||||
|
||||
static const struct clk_ops pll_vco_div_clk_ops = {
|
||||
.recalc_rate = dp_pll_vco_div_clk_recalc_rate,
|
||||
.round_rate = dp_pll_vco_div_clk_round,
|
||||
};
|
||||
|
||||
static struct dp_pll_vco_clk dp0_phy_pll_clks[DP_PLL_NUM_CLKS] = {
|
||||
{
|
||||
.hw.init = &(struct clk_init_data) {
|
||||
.name = "dp0_phy_pll_link_clk",
|
||||
.ops = &pll_link_clk_ops,
|
||||
},
|
||||
},
|
||||
{
|
||||
.hw.init = &(struct clk_init_data) {
|
||||
.name = "dp0_phy_pll_vco_div_clk",
|
||||
.ops = &pll_vco_div_clk_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct dp_pll_vco_clk dp_phy_pll_clks[DP_PLL_NUM_CLKS] = {
|
||||
{
|
||||
.hw.init = &(struct clk_init_data) {
|
||||
.name = "dp_phy_pll_link_clk",
|
||||
.ops = &pll_link_clk_ops,
|
||||
},
|
||||
},
|
||||
{
|
||||
.hw.init = &(struct clk_init_data) {
|
||||
.name = "dp_phy_pll_vco_div_clk",
|
||||
.ops = &pll_vco_div_clk_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct dp_pll_db dp_pdb;
|
||||
|
||||
int dp_pll_clock_register_4nm(struct dp_pll *pll)
|
||||
{
|
||||
int rc = 0;
|
||||
struct platform_device *pdev;
|
||||
struct dp_pll_vco_clk *pll_clks;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("pll data not initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pdev = pll->pdev;
|
||||
|
||||
pll->clk_data = kzalloc(sizeof(*pll->clk_data), GFP_KERNEL);
|
||||
if (!pll->clk_data)
|
||||
return -ENOMEM;
|
||||
|
||||
pll->clk_data->clks = kcalloc(DP_PLL_NUM_CLKS, sizeof(struct clk *),
|
||||
GFP_KERNEL);
|
||||
if (!pll->clk_data->clks) {
|
||||
kfree(pll->clk_data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pll->clk_data->clk_num = DP_PLL_NUM_CLKS;
|
||||
pll->priv = &dp_pdb;
|
||||
dp_pdb.pll = pll;
|
||||
|
||||
pll->pll_cfg = dp_pll_configure;
|
||||
pll->pll_prepare = dp_pll_prepare;
|
||||
pll->pll_unprepare = dp_pll_unprepare;
|
||||
|
||||
if (pll->dp_core_revision >= 0x10040000)
|
||||
pll_clks = dp0_phy_pll_clks;
|
||||
else
|
||||
pll_clks = dp_phy_pll_clks;
|
||||
|
||||
rc = dp_pll_clock_register_helper(pll, pll_clks, DP_PLL_NUM_CLKS);
|
||||
if (rc) {
|
||||
DP_ERR("Clock register failed rc=%d\n", rc);
|
||||
goto clk_reg_fail;
|
||||
}
|
||||
|
||||
rc = of_clk_add_provider(pdev->dev.of_node,
|
||||
of_clk_src_onecell_get, pll->clk_data);
|
||||
if (rc) {
|
||||
DP_ERR("Clock add provider failed rc=%d\n", rc);
|
||||
goto clk_reg_fail;
|
||||
}
|
||||
|
||||
DP_DEBUG("success\n");
|
||||
return rc;
|
||||
|
||||
clk_reg_fail:
|
||||
dp_pll_clock_unregister_4nm(pll);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void dp_pll_clock_unregister_4nm(struct dp_pll *pll)
|
||||
{
|
||||
kfree(pll->clk_data->clks);
|
||||
kfree(pll->clk_data);
|
||||
}
|
@@ -129,6 +129,69 @@
|
||||
#define DP_5NM_PHY_READY BIT(1)
|
||||
#define DP_5NM_TSYNC_DONE BIT(0)
|
||||
|
||||
static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_gdsc_disabled(pll))
|
||||
return -EINVAL;
|
||||
|
||||
val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV);
|
||||
val &= ~0x03;
|
||||
|
||||
switch (div) {
|
||||
case 2:
|
||||
val |= 1;
|
||||
break;
|
||||
case 4:
|
||||
val |= 2;
|
||||
break;
|
||||
case 6:
|
||||
/* When div = 6, val is 0, so do nothing here */
|
||||
;
|
||||
break;
|
||||
case 8:
|
||||
val |= 3;
|
||||
break;
|
||||
default:
|
||||
DP_DEBUG("unsupported div value %d\n", div);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_VCO_DIV, val);
|
||||
/* Make sure the PHY registers writes are done */
|
||||
wmb();
|
||||
|
||||
DP_DEBUG("val=%d div=%x\n", val, div);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_vco_div(struct dp_pll *pll, unsigned long rate)
|
||||
{
|
||||
int div;
|
||||
int rc = 0;
|
||||
|
||||
if (rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
|
||||
div = 6;
|
||||
else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
|
||||
div = 4;
|
||||
else
|
||||
div = 2;
|
||||
|
||||
rc = dp_vco_clk_set_div(pll, div);
|
||||
if (rc < 0) {
|
||||
DP_DEBUG("set vco div failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_vco_pll_init_db_5nm(struct dp_pll_db *pdb,
|
||||
unsigned long rate)
|
||||
{
|
||||
@@ -336,7 +399,7 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
|
||||
/* Make sure the PHY register writes are done */
|
||||
wmb();
|
||||
|
||||
return rc;
|
||||
return set_vco_div(pll, rate);
|
||||
}
|
||||
|
||||
enum dp_5nm_pll_status {
|
||||
@@ -475,48 +538,6 @@ static void dp_pll_disable_5nm(struct dp_pll *pll)
|
||||
wmb();
|
||||
}
|
||||
|
||||
static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
if (!pll) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_gdsc_disabled(pll))
|
||||
return -EINVAL;
|
||||
|
||||
val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV);
|
||||
val &= ~0x03;
|
||||
|
||||
switch (div) {
|
||||
case 2:
|
||||
val |= 1;
|
||||
break;
|
||||
case 4:
|
||||
val |= 2;
|
||||
break;
|
||||
case 6:
|
||||
/* When div = 6, val is 0, so do nothing here */
|
||||
;
|
||||
break;
|
||||
case 8:
|
||||
val |= 3;
|
||||
break;
|
||||
default:
|
||||
DP_DEBUG("unsupported div value %d\n", div);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dp_pll_write(dp_phy, DP_PHY_VCO_DIV, val);
|
||||
/* Make sure the PHY registers writes are done */
|
||||
wmb();
|
||||
|
||||
DP_DEBUG("val=%d div=%x\n", val, div);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_vco_set_rate_5nm(struct dp_pll *pll, unsigned long rate)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -787,36 +808,6 @@ static long dp_pll_vco_div_clk_round(struct clk_hw *hw, unsigned long rate,
|
||||
return dp_pll_vco_div_clk_recalc_rate(hw, *parent_rate);
|
||||
}
|
||||
|
||||
static int dp_pll_vco_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct dp_pll *pll = NULL;
|
||||
struct dp_pll_vco_clk *pll_link = NULL;
|
||||
int rc = 0;
|
||||
|
||||
if (!hw) {
|
||||
DP_ERR("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pll_link = to_dp_vco_hw(hw);
|
||||
pll = pll_link->priv;
|
||||
|
||||
if (rate != dp_pll_vco_div_clk_get_rate(pll)) {
|
||||
DP_ERR("unsupported rate %lu failed\n", rate);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = dp_vco_clk_set_div(pll, pll->vco_rate / rate);
|
||||
if (rc < 0) {
|
||||
DP_DEBUG("set rate %lu failed\n", rate);
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_DEBUG("set rate %lu success\n", rate);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct clk_ops pll_link_clk_ops = {
|
||||
.recalc_rate = dp_pll_link_clk_recalc_rate,
|
||||
.round_rate = dp_pll_link_clk_round,
|
||||
@@ -825,7 +816,6 @@ static const struct clk_ops pll_link_clk_ops = {
|
||||
static const struct clk_ops pll_vco_div_clk_ops = {
|
||||
.recalc_rate = dp_pll_vco_div_clk_recalc_rate,
|
||||
.round_rate = dp_pll_vco_div_clk_round,
|
||||
.set_rate = dp_pll_vco_div_clk_set_rate,
|
||||
};
|
||||
|
||||
static struct dp_pll_vco_clk dp0_phy_pll_clks[DP_PLL_NUM_CLKS] = {
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include "dp_pll.h"
|
||||
|
||||
#define DP_CLIENT_NAME_SIZE 20
|
||||
#define XO_CLK_KHZ 19200
|
||||
|
||||
struct dp_power_private {
|
||||
struct dp_parser *parser;
|
||||
@@ -19,6 +20,7 @@ struct dp_power_private {
|
||||
struct clk *pixel_clk_rcg;
|
||||
struct clk *pixel_parent;
|
||||
struct clk *pixel1_clk_rcg;
|
||||
struct clk *xo_clk;
|
||||
|
||||
struct dp_power dp_power;
|
||||
|
||||
@@ -26,6 +28,8 @@ struct dp_power_private {
|
||||
bool link_clks_on;
|
||||
bool strm0_clks_on;
|
||||
bool strm1_clks_on;
|
||||
bool strm0_clks_parked;
|
||||
bool strm1_clks_parked;
|
||||
};
|
||||
|
||||
static int dp_power_regulator_init(struct dp_power_private *power)
|
||||
@@ -220,6 +224,14 @@ static int dp_power_clk_init(struct dp_power_private *power, bool enable)
|
||||
goto err_pixel_parent;
|
||||
}
|
||||
|
||||
power->xo_clk = clk_get(dev, "rpmh_cxo_clk");
|
||||
if (IS_ERR(power->xo_clk)) {
|
||||
DP_ERR("Unable to get XO clk: %d\n", PTR_ERR(power->xo_clk));
|
||||
rc = PTR_ERR(power->xo_clk);
|
||||
power->xo_clk = NULL;
|
||||
goto err_xo_clk;
|
||||
}
|
||||
|
||||
if (power->parser->has_mst) {
|
||||
power->pixel1_clk_rcg = clk_get(dev, "pixel1_clk_rcg");
|
||||
if (IS_ERR(power->pixel1_clk_rcg)) {
|
||||
@@ -244,8 +256,9 @@ static int dp_power_clk_init(struct dp_power_private *power, bool enable)
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
err_pixel1_clk_rcg:
|
||||
clk_put(power->xo_clk);
|
||||
err_xo_clk:
|
||||
clk_put(power->pixel_parent);
|
||||
err_pixel_parent:
|
||||
clk_put(power->pixel_clk_rcg);
|
||||
@@ -255,6 +268,59 @@ exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_power_park_module(struct dp_power_private *power, enum dp_pm_type module)
|
||||
{
|
||||
struct dss_module_power *mp;
|
||||
struct clk *clk = NULL;
|
||||
int rc = 0;
|
||||
bool *parked;
|
||||
|
||||
mp = &power->parser->mp[module];
|
||||
|
||||
if (module == DP_STREAM0_PM) {
|
||||
clk = power->pixel_clk_rcg;
|
||||
parked = &power->strm0_clks_parked;
|
||||
} else if (module == DP_STREAM1_PM) {
|
||||
clk = power->pixel1_clk_rcg;
|
||||
parked = &power->strm1_clks_parked;
|
||||
} else {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!clk) {
|
||||
DP_WARN("clk type %d not supported\n", module);
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (!power->xo_clk) {
|
||||
rc = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (*parked)
|
||||
goto exit;
|
||||
|
||||
rc = clk_set_parent(clk, power->xo_clk);
|
||||
if (rc) {
|
||||
DP_ERR("unable to set xo parent on clk %d\n", module);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
mp->clk_config->rate = XO_CLK_KHZ;
|
||||
rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
|
||||
if (rc) {
|
||||
DP_ERR("failed to set clk rate.\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
*parked = true;
|
||||
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static int dp_power_clk_set_rate(struct dp_power_private *power,
|
||||
enum dp_pm_type module, bool enable)
|
||||
{
|
||||
@@ -287,6 +353,8 @@ static int dp_power_clk_set_rate(struct dp_power_private *power,
|
||||
DP_ERR("failed to disable clks\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
dp_power_park_module(power, module);
|
||||
}
|
||||
exit:
|
||||
return rc;
|
||||
@@ -367,6 +435,11 @@ static int dp_power_clk_enable(struct dp_power *dp_power,
|
||||
else if (pm_type == DP_LINK_PM)
|
||||
power->link_clks_on = enable;
|
||||
|
||||
if (pm_type == DP_STREAM0_PM)
|
||||
power->strm0_clks_parked = false;
|
||||
if (pm_type == DP_STREAM1_PM)
|
||||
power->strm1_clks_parked = false;
|
||||
|
||||
/*
|
||||
* This log is printed only when user connects or disconnects
|
||||
* a DP cable. As this is a user-action and not a frequent
|
||||
@@ -581,6 +654,34 @@ static void dp_power_client_deinit(struct dp_power *dp_power)
|
||||
dp_power_regulator_deinit(power);
|
||||
}
|
||||
|
||||
static int dp_power_park_clocks(struct dp_power *dp_power)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dp_power_private *power;
|
||||
|
||||
if (!dp_power) {
|
||||
DP_ERR("invalid power data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
power = container_of(dp_power, struct dp_power_private, dp_power);
|
||||
|
||||
rc = dp_power_park_module(power, DP_STREAM0_PM);
|
||||
if (rc) {
|
||||
DP_ERR("failed to park stream 0. err=%d\n", rc);
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = dp_power_park_module(power, DP_STREAM1_PM);
|
||||
if (rc) {
|
||||
DP_ERR("failed to park stream 1. err=%d\n", rc);
|
||||
goto error;
|
||||
}
|
||||
|
||||
error:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power, u32 strm_id)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -764,6 +865,7 @@ struct dp_power *dp_power_get(struct dp_parser *parser, struct dp_pll *pll)
|
||||
dp_power->clk_enable = dp_power_clk_enable;
|
||||
dp_power->clk_status = dp_power_clk_status;
|
||||
dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent;
|
||||
dp_power->park_clocks = dp_power_park_clocks;
|
||||
dp_power->clk_get_rate = dp_power_clk_get_rate;
|
||||
dp_power->power_client_init = dp_power_client_init;
|
||||
dp_power->power_client_deinit = dp_power_client_deinit;
|
||||
|
@@ -18,6 +18,7 @@
|
||||
* @clk_enable: enable/disable the DP clocks
|
||||
* @clk_status: check for clock status
|
||||
* @set_pixel_clk_parent: set the parent of DP pixel clock
|
||||
* @park_clocks: park all clocks driven by PLL
|
||||
* @clk_get_rate: get the current rate for provided clk_name
|
||||
* @power_client_init: configures clocks and regulators
|
||||
* @power_client_deinit: frees clock and regulator resources
|
||||
@@ -32,6 +33,7 @@ struct dp_power {
|
||||
bool enable);
|
||||
bool (*clk_status)(struct dp_power *power, enum dp_pm_type pm_type);
|
||||
int (*set_pixel_clk_parent)(struct dp_power *power, u32 stream_id);
|
||||
int (*park_clocks)(struct dp_power *power);
|
||||
u64 (*clk_get_rate)(struct dp_power *power, char *clk_name);
|
||||
int (*power_client_init)(struct dp_power *power,
|
||||
struct sde_power_handle *phandle,
|
||||
|
@@ -378,9 +378,11 @@
|
||||
#define TXn_HIGHZ_DRVR_EN (0x0060)
|
||||
|
||||
#define DP_PHY_STATUS (0x00DC)
|
||||
#define DP_PHY_STATUS_V600 (0x00E4)
|
||||
#define DP_PHY_AUX_INTERRUPT_MASK_V420 (0x0054)
|
||||
#define DP_PHY_AUX_INTERRUPT_CLEAR_V420 (0x0058)
|
||||
#define DP_PHY_AUX_INTERRUPT_STATUS_V420 (0x00D8)
|
||||
#define DP_PHY_AUX_INTERRUPT_STATUS_V600 (0x00E0)
|
||||
#define DP_PHY_SPARE0_V420 (0x00C8)
|
||||
#define TXn_TX_DRV_LVL_V420 (0x0014)
|
||||
#define TXn_TRANSCEIVER_BIAS_EN_V420 (0x0054)
|
||||
@@ -388,6 +390,7 @@
|
||||
#define TXn_TX_POL_INV_V420 (0x005C)
|
||||
|
||||
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x044)
|
||||
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600 (0x0DC)
|
||||
|
||||
/* DP MMSS_CC registers */
|
||||
#define MMSS_DP_PIXEL_M (0x01B4)
|
||||
|
@@ -365,13 +365,6 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct dsi_ctrl *dsi_ctrl)
|
||||
dsi_hw_ops = dsi_ctrl->hw.ops;
|
||||
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
|
||||
|
||||
/*
|
||||
* This atomic state will be set if ISR has been triggered,
|
||||
* so the wait is not needed.
|
||||
*/
|
||||
if (atomic_read(&dsi_ctrl->dma_irq_trig))
|
||||
return;
|
||||
|
||||
ret = wait_for_completion_timeout(
|
||||
&dsi_ctrl->irq_info.cmd_dma_done,
|
||||
msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
|
||||
@@ -392,6 +385,7 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct dsi_ctrl *dsi_ctrl)
|
||||
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
|
||||
DSI_SINT_CMD_MODE_DMA_DONE);
|
||||
}
|
||||
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_EXIT);
|
||||
|
||||
}
|
||||
|
||||
@@ -434,7 +428,8 @@ static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl)
|
||||
if ((dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_BROADCAST) &&
|
||||
!(dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
|
||||
dsi_ctrl_clear_dma_status(dsi_ctrl);
|
||||
} else {
|
||||
} else if (!(dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_READ)) {
|
||||
/* Wait for read command transfer to complete is done in dsi_message_rx. */
|
||||
dsi_ctrl_dma_cmd_wait_for_done(dsi_ctrl);
|
||||
}
|
||||
|
||||
@@ -443,9 +438,7 @@ static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl)
|
||||
if (rc)
|
||||
DSI_CTRL_ERR(dsi_ctrl, "failed to disable command engine\n");
|
||||
|
||||
if (dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_READ)
|
||||
mask |= BIT(DSI_FIFO_UNDERFLOW);
|
||||
|
||||
if (!(dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_READ))
|
||||
dsi_ctrl_mask_error_status_interrupts(dsi_ctrl, mask, false);
|
||||
|
||||
mutex_unlock(&dsi_ctrl->ctrl_lock);
|
||||
@@ -1505,7 +1498,7 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd_de
|
||||
goto error;
|
||||
}
|
||||
|
||||
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, *flags);
|
||||
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, *flags, dsi_ctrl->cmd_len);
|
||||
|
||||
if (*flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
|
||||
cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
|
||||
@@ -1905,16 +1898,18 @@ static int dsi_disable_ulps(struct dsi_ctrl *dsi_ctrl)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dsi_ctrl_enable_error_interrupts(struct dsi_ctrl *dsi_ctrl)
|
||||
void dsi_ctrl_toggle_error_interrupt_status(struct dsi_ctrl *dsi_ctrl, bool enable)
|
||||
{
|
||||
if (!enable) {
|
||||
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0);
|
||||
} else {
|
||||
if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE &&
|
||||
!dsi_ctrl->host_config.u.video_engine.bllp_lp11_en &&
|
||||
!dsi_ctrl->host_config.u.video_engine.eof_bllp_lp11_en)
|
||||
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw,
|
||||
0xFF00A0);
|
||||
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00A0);
|
||||
else
|
||||
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw,
|
||||
0xFF00E0);
|
||||
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
|
||||
}
|
||||
}
|
||||
|
||||
static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl)
|
||||
@@ -2593,7 +2588,7 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
|
||||
&dsi_ctrl->host_config.common_config);
|
||||
|
||||
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
|
||||
dsi_ctrl_enable_error_interrupts(dsi_ctrl);
|
||||
dsi_ctrl_toggle_error_interrupt_status(dsi_ctrl, true);
|
||||
|
||||
dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
|
||||
|
||||
@@ -2953,7 +2948,10 @@ void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
|
||||
intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
|
||||
return;
|
||||
|
||||
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx);
|
||||
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx,
|
||||
dsi_ctrl->irq_info.irq_num, dsi_ctrl->irq_info.irq_stat_mask,
|
||||
dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]);
|
||||
|
||||
spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
|
||||
|
||||
if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
|
||||
@@ -2986,7 +2984,10 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
|
||||
if (!dsi_ctrl || intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
|
||||
return;
|
||||
|
||||
SDE_EVT32_IRQ(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx);
|
||||
SDE_EVT32_IRQ(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx,
|
||||
dsi_ctrl->irq_info.irq_num, dsi_ctrl->irq_info.irq_stat_mask,
|
||||
dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]);
|
||||
|
||||
spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
|
||||
|
||||
if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
|
||||
@@ -3127,7 +3128,7 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool skip_op)
|
||||
}
|
||||
|
||||
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
|
||||
dsi_ctrl_enable_error_interrupts(dsi_ctrl);
|
||||
dsi_ctrl_toggle_error_interrupt_status(dsi_ctrl, true);
|
||||
|
||||
DSI_CTRL_DEBUG(dsi_ctrl, "Host initialization complete, skip op: %d\n",
|
||||
skip_op);
|
||||
@@ -3413,9 +3414,7 @@ int dsi_ctrl_transfer_prepare(struct dsi_ctrl *dsi_ctrl, u32 flags)
|
||||
|
||||
mutex_lock(&dsi_ctrl->ctrl_lock);
|
||||
|
||||
if (flags & DSI_CTRL_CMD_READ)
|
||||
mask |= BIT(DSI_FIFO_UNDERFLOW);
|
||||
|
||||
if (!(flags & DSI_CTRL_CMD_READ))
|
||||
dsi_ctrl_mask_error_status_interrupts(dsi_ctrl, mask, true);
|
||||
|
||||
rc = dsi_ctrl_set_cmd_engine_state(dsi_ctrl, DSI_CTRL_ENGINE_ON, false);
|
||||
|
@@ -920,4 +920,8 @@ int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
|
||||
*/
|
||||
int dsi_ctrl_get_io_resources(struct msm_io_res *io_res);
|
||||
|
||||
/**
|
||||
* dsi_ctrl_toggle_error_interrupt_status() - Toggles error interrupt status
|
||||
*/
|
||||
void dsi_ctrl_toggle_error_interrupt_status(struct dsi_ctrl *dsi_ctrl, bool enable);
|
||||
#endif /* _DSI_CTRL_H_ */
|
||||
|
@@ -299,6 +299,14 @@ void dsi_ctrl_hw_22_configure_splitlink(struct dsi_ctrl_hw *ctrl,
|
||||
else
|
||||
reg |= (BIT(12) | BIT(13));
|
||||
|
||||
/**
|
||||
* Avoid dma trigger on sublink1 for read commands. This can be
|
||||
* enabled in future if panel supports sending read command on sublink1.
|
||||
*/
|
||||
if (flags & DSI_CTRL_CMD_READ) {
|
||||
reg = reg & ~BIT(13);
|
||||
}
|
||||
|
||||
DSI_W32(ctrl, DSI_SPLIT_LINK, reg);
|
||||
|
||||
/* Make sure the split link config is updated */
|
||||
|
@@ -882,6 +882,8 @@ void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
|
||||
|
||||
if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
|
||||
DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
|
||||
|
||||
SDE_EVT32(ctrl->index, cmd->length, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1411,6 +1413,22 @@ void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
|
||||
{
|
||||
u32 int_ctrl = 0;
|
||||
u32 int_mask0 = 0x7FFF3BFF;
|
||||
u32 dln0_phy_err = 0x11111;
|
||||
u32 fifo_status = 0xCCCC0789;
|
||||
u32 ack_error = 0x1193BFFF;
|
||||
u32 timeout_status = 0x11111111;
|
||||
u32 clk_status = 0x10000;
|
||||
u32 dsi_status_error = 0x80000000;
|
||||
u32 reg = 0;
|
||||
|
||||
DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
|
||||
DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
|
||||
DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_status);
|
||||
DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
|
||||
reg = DSI_R32(ctrl, DSI_CLK_STATUS);
|
||||
DSI_W32(ctrl, DSI_CLK_STATUS, reg | clk_status);
|
||||
reg = DSI_R32(ctrl, DSI_STATUS);
|
||||
DSI_W32(ctrl, DSI_STATUS, reg | dsi_status_error);
|
||||
|
||||
int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
|
||||
if (errors)
|
||||
|
@@ -771,23 +771,6 @@ static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
|
||||
return 24;
|
||||
}
|
||||
|
||||
/* return number of DSI data lanes */
|
||||
static inline int dsi_get_num_of_data_lanes(enum dsi_data_lanes dlanes)
|
||||
{
|
||||
int num_of_lanes = 0;
|
||||
|
||||
if (dlanes & DSI_DATA_LANE_0)
|
||||
num_of_lanes++;
|
||||
if (dlanes & DSI_DATA_LANE_1)
|
||||
num_of_lanes++;
|
||||
if (dlanes & DSI_DATA_LANE_2)
|
||||
num_of_lanes++;
|
||||
if (dlanes & DSI_DATA_LANE_3)
|
||||
num_of_lanes++;
|
||||
|
||||
return num_of_lanes;
|
||||
}
|
||||
|
||||
static inline u64 dsi_h_active_dce(struct dsi_mode_info *mode)
|
||||
{
|
||||
u64 h_active = 0;
|
||||
|
@@ -922,6 +922,19 @@ static int dsi_display_status_check_te(struct dsi_display *display,
|
||||
return rc;
|
||||
}
|
||||
|
||||
void dsi_display_toggle_error_interrupt_status(struct dsi_display * display, bool enable)
|
||||
{
|
||||
int i = 0;
|
||||
struct dsi_display_ctrl *ctrl;
|
||||
|
||||
display_for_each_ctrl(i, display) {
|
||||
ctrl = &display->ctrl[i];
|
||||
if (!ctrl->ctrl)
|
||||
continue;
|
||||
dsi_ctrl_toggle_error_interrupt_status(ctrl->ctrl, enable);
|
||||
}
|
||||
}
|
||||
|
||||
int dsi_display_check_status(struct drm_connector *connector, void *display,
|
||||
bool te_check_override)
|
||||
{
|
||||
@@ -967,6 +980,11 @@ int dsi_display_check_status(struct drm_connector *connector, void *display,
|
||||
|
||||
dsi_display_set_ctrl_esd_check_flag(dsi_display, true);
|
||||
|
||||
dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON);
|
||||
|
||||
/* Disable error interrupts while doing an ESD check */
|
||||
dsi_display_toggle_error_interrupt_status(dsi_display, false);
|
||||
|
||||
if (status_mode == ESD_MODE_REG_READ) {
|
||||
rc = dsi_display_status_reg_read(dsi_display);
|
||||
} else if (status_mode == ESD_MODE_SW_BTA) {
|
||||
@@ -991,7 +1009,11 @@ int dsi_display_check_status(struct drm_connector *connector, void *display,
|
||||
/* Handle Panel failures during display disable sequence */
|
||||
if (rc <=0)
|
||||
atomic_set(&panel->esd_recovery_pending, 1);
|
||||
else
|
||||
/* Enable error interrupts post an ESD success */
|
||||
dsi_display_toggle_error_interrupt_status(dsi_display, true);
|
||||
|
||||
dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_OFF);
|
||||
release_panel_lock:
|
||||
dsi_panel_release_panel_lock(panel);
|
||||
SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, rc);
|
||||
@@ -1041,18 +1063,23 @@ static int dsi_display_cmd_rx(struct dsi_display *display,
|
||||
|
||||
flags = DSI_CTRL_CMD_READ;
|
||||
|
||||
dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON);
|
||||
dsi_display_toggle_error_interrupt_status(display, false);
|
||||
cmd->ctrl_flags = flags;
|
||||
dsi_display_set_cmd_tx_ctrl_flags(display, cmd);
|
||||
rc = dsi_ctrl_transfer_prepare(m_ctrl->ctrl, cmd->ctrl_flags);
|
||||
if (rc) {
|
||||
DSI_ERR("prepare for rx cmd transfer failed rc = %d\n", rc);
|
||||
goto release_panel_lock;
|
||||
goto enable_error_interrupts;
|
||||
}
|
||||
rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, cmd);
|
||||
if (rc <= 0)
|
||||
DSI_ERR("rx cmd transfer failed rc = %d\n", rc);
|
||||
dsi_ctrl_transfer_unprepare(m_ctrl->ctrl, cmd->ctrl_flags);
|
||||
|
||||
enable_error_interrupts:
|
||||
dsi_display_toggle_error_interrupt_status(display, true);
|
||||
dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_OFF);
|
||||
release_panel_lock:
|
||||
dsi_panel_release_panel_lock(display->panel);
|
||||
return rc;
|
||||
@@ -1098,6 +1125,8 @@ int dsi_display_cmd_transfer(struct drm_connector *connector,
|
||||
goto end;
|
||||
}
|
||||
|
||||
SDE_EVT32(dsi_display->tx_cmd_buf_ndx, cmd_buf_len);
|
||||
|
||||
/*
|
||||
* Reset the dbgfs buffer if the commands sent exceed the available
|
||||
* buffer size. For video mode, limiting the buffer size to 2K to
|
||||
@@ -1230,6 +1259,8 @@ int dsi_display_cmd_receive(void *display, const char *cmd_buf,
|
||||
goto end;
|
||||
}
|
||||
|
||||
SDE_EVT32(cmd_buf_len, recv_buf_len);
|
||||
|
||||
rc = dsi_display_cmd_rx(dsi_display, &cmd);
|
||||
if (rc <= 0)
|
||||
DSI_ERR("[DSI] Display command receive failed, rc=%d\n", rc);
|
||||
@@ -3554,6 +3585,21 @@ static void dsi_display_ctrl_isr_configure(struct dsi_display *display, bool en)
|
||||
}
|
||||
}
|
||||
|
||||
static void dsi_display_cleanup_post_esd_failure(struct dsi_display *display)
|
||||
{
|
||||
int i = 0;
|
||||
struct dsi_display_ctrl *ctrl;
|
||||
|
||||
display_for_each_ctrl(i, display) {
|
||||
ctrl = &display->ctrl[i];
|
||||
if (!ctrl->ctrl)
|
||||
continue;
|
||||
|
||||
dsi_phy_lane_reset(ctrl->phy);
|
||||
dsi_ctrl_soft_reset(ctrl->ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
int dsi_pre_clkoff_cb(void *priv,
|
||||
enum dsi_clk_type clk,
|
||||
enum dsi_lclk_type l_type,
|
||||
@@ -3565,6 +3611,14 @@ int dsi_pre_clkoff_cb(void *priv,
|
||||
|
||||
if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
|
||||
(l_type & DSI_LINK_LP_CLK)) {
|
||||
|
||||
/*
|
||||
* Clean up the DSI controller on a previous ESD failure. This requires a DSI
|
||||
* controller soft reset. Also reset PHY lanes before resetting controller.
|
||||
*/
|
||||
if (atomic_read(&display->panel->esd_recovery_pending))
|
||||
dsi_display_cleanup_post_esd_failure(display);
|
||||
|
||||
/*
|
||||
* If continuous clock is enabled then disable it
|
||||
* before entering into ULPS Mode.
|
||||
@@ -3758,6 +3812,13 @@ int dsi_post_clkoff_cb(void *priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Reset PHY to clear the PHY status once the HS clocks are turned off */
|
||||
if ((clk_type & DSI_LINK_CLK) && (curr_state == DSI_CLK_OFF)
|
||||
&& (l_type == DSI_LINK_HS_CLK)) {
|
||||
if (atomic_read(&display->panel->esd_recovery_pending))
|
||||
dsi_display_phy_sw_reset(display);
|
||||
}
|
||||
|
||||
if ((clk_type & DSI_CORE_CLK) &&
|
||||
(curr_state == DSI_CLK_OFF)) {
|
||||
rc = dsi_display_phy_power_off(display);
|
||||
@@ -4064,7 +4125,8 @@ error:
|
||||
static bool dsi_display_validate_panel_resources(struct dsi_display *display)
|
||||
{
|
||||
if (!is_sim_panel(display)) {
|
||||
if (!gpio_is_valid(display->panel->reset_config.reset_gpio)) {
|
||||
if (!display->panel->host_config.ext_bridge_mode &&
|
||||
!gpio_is_valid(display->panel->reset_config.reset_gpio)) {
|
||||
DSI_ERR("invalid reset gpio for the panel\n");
|
||||
return false;
|
||||
}
|
||||
@@ -6796,6 +6858,57 @@ static void _dsi_display_populate_bit_clks(struct dsi_display *display, int star
|
||||
}
|
||||
}
|
||||
|
||||
static int dsi_display_mode_dyn_clk_cpy(struct dsi_display *display,
|
||||
struct dsi_display_mode *src, struct dsi_display_mode *dst)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 count = 0;
|
||||
struct dsi_dyn_clk_caps *dyn_clk_caps;
|
||||
struct msm_dyn_clk_list *bit_clk_list;
|
||||
|
||||
dyn_clk_caps = &(display->panel->dyn_clk_caps);
|
||||
if (!dyn_clk_caps->dyn_clk_support)
|
||||
return rc;
|
||||
|
||||
count = dst->priv_info->bit_clk_list.count;
|
||||
bit_clk_list = &dst->priv_info->bit_clk_list;
|
||||
bit_clk_list->front_porches =
|
||||
kcalloc(count, sizeof(u32), GFP_KERNEL);
|
||||
if (!bit_clk_list->front_porches) {
|
||||
DSI_ERR("failed to allocate space for front porch list\n");
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
bit_clk_list->rates =
|
||||
kcalloc(count, sizeof(u32), GFP_KERNEL);
|
||||
if (!bit_clk_list->rates) {
|
||||
DSI_ERR("failed to allocate space for rates list\n");
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
memcpy(bit_clk_list->rates, src->priv_info->bit_clk_list.rates,
|
||||
count*sizeof(u32));
|
||||
|
||||
bit_clk_list->pixel_clks_khz =
|
||||
kcalloc(count, sizeof(u32), GFP_KERNEL);
|
||||
if (!bit_clk_list->pixel_clks_khz) {
|
||||
DSI_ERR("failed to allocate space for pixel clocks list\n");
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
error:
|
||||
kfree(bit_clk_list->rates);
|
||||
kfree(bit_clk_list->front_porches);
|
||||
kfree(bit_clk_list->pixel_clks_khz);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int dsi_display_restore_bit_clk(struct dsi_display *display, struct dsi_display_mode *mode)
|
||||
{
|
||||
int i;
|
||||
@@ -6806,6 +6919,10 @@ int dsi_display_restore_bit_clk(struct dsi_display *display, struct dsi_display_
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* avoid updating bit_clk for dyn clk feature disbaled usecase */
|
||||
if (!display->panel->dyn_clk_caps.dyn_clk_support)
|
||||
return 0;
|
||||
|
||||
clk_rate_hz = display->cached_clk_rate;
|
||||
|
||||
if (mode->priv_info->bit_clk_list.count) {
|
||||
@@ -6891,6 +7008,7 @@ int dsi_display_get_modes(struct dsi_display *display,
|
||||
int topology_override = NO_OVERRIDE;
|
||||
bool is_preferred = false;
|
||||
u32 frame_threshold_us = ctrl->ctrl->frame_threshold_time_us;
|
||||
struct msm_dyn_clk_list *bit_clk_list;
|
||||
|
||||
memset(&display_mode, 0, sizeof(display_mode));
|
||||
|
||||
@@ -6984,10 +7102,24 @@ int dsi_display_get_modes(struct dsi_display *display,
|
||||
* Qsync min fps for the mode will be populated in the timing info
|
||||
* in dsi_panel_get_mode function.
|
||||
*/
|
||||
sub_mode->priv_info->qsync_min_fps = sub_mode->timing.qsync_min_fps;
|
||||
display_mode.priv_info->qsync_min_fps = sub_mode->timing.qsync_min_fps;
|
||||
if (!dfps_caps.dfps_support || !support_video_mode)
|
||||
continue;
|
||||
|
||||
sub_mode->priv_info = kmemdup(display_mode.priv_info,
|
||||
sizeof(*sub_mode->priv_info), GFP_KERNEL);
|
||||
if (!sub_mode->priv_info) {
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = dsi_display_mode_dyn_clk_cpy(display,
|
||||
&display_mode, sub_mode);
|
||||
if (rc) {
|
||||
DSI_ERR("unable to copy dyn clock list\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
sub_mode->mode_idx += (array_idx - 1);
|
||||
curr_refresh_rate = sub_mode->timing.refresh_rate;
|
||||
sub_mode->timing.refresh_rate = dfps_caps.dfps_list[i];
|
||||
@@ -7010,6 +7142,17 @@ int dsi_display_get_modes(struct dsi_display *display,
|
||||
/* Set first timing sub mode as preferred mode */
|
||||
display->modes[start].is_preferred = true;
|
||||
}
|
||||
|
||||
bit_clk_list = &display_mode.priv_info->bit_clk_list;
|
||||
if (support_video_mode && dfps_caps.dfps_support) {
|
||||
if (dyn_clk_caps->dyn_clk_support) {
|
||||
kfree(bit_clk_list->rates);
|
||||
kfree(bit_clk_list->front_porches);
|
||||
kfree(bit_clk_list->pixel_clks_khz);
|
||||
}
|
||||
|
||||
kfree(display_mode.priv_info);
|
||||
}
|
||||
}
|
||||
|
||||
if (dsc_modes && nondsc_modes)
|
||||
@@ -7206,7 +7349,8 @@ int dsi_display_find_mode(struct dsi_display *display,
|
||||
return rc;
|
||||
}
|
||||
|
||||
priv_info = kzalloc(sizeof(struct dsi_display_mode_priv_info), GFP_KERNEL);
|
||||
priv_info = kvzalloc(sizeof(struct dsi_display_mode_priv_info),
|
||||
GFP_KERNEL);
|
||||
if (ZERO_OR_NULL_PTR(priv_info))
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -7241,7 +7385,7 @@ int dsi_display_find_mode(struct dsi_display *display,
|
||||
cmp->priv_info = NULL;
|
||||
|
||||
mutex_unlock(&display->display_lock);
|
||||
kfree(priv_info);
|
||||
kvfree(priv_info);
|
||||
|
||||
if (!*out_mode) {
|
||||
DSI_ERR("[%s] failed to find mode for v_active %u h_active %u fps %u pclk %u\n",
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#define MAX_BL_LEVEL 4096
|
||||
#define MAX_BL_SCALE_LEVEL 1024
|
||||
#define MAX_SV_BL_SCALE_LEVEL 65535
|
||||
#define SV_BL_SCALE_CAP (MAX_SV_BL_SCALE_LEVEL * 4)
|
||||
#define DSI_CMD_PPS_SIZE 135
|
||||
|
||||
#define DSI_CMD_PPS_HDR_SIZE 7
|
||||
|
@@ -930,6 +930,7 @@ void *dsi_parser_get_head_node(void *in,
|
||||
buf = parser->buf;
|
||||
|
||||
memcpy(buf, data, size);
|
||||
buf[size] = '\0';
|
||||
|
||||
strreplace(buf, '\n', ' ');
|
||||
strreplace(buf, '\t', '*');
|
||||
|
@@ -774,6 +774,40 @@ error:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* dsi_phy_get_data_lanes_count() - Count the data lines need to be configured
|
||||
* @dsi_phy: DSI PHY handle.
|
||||
*
|
||||
* Return: Count of data lanes being used
|
||||
*/
|
||||
static inline int dsi_phy_get_data_lanes_count(struct msm_dsi_phy *phy)
|
||||
{
|
||||
int num_of_lanes = 0;
|
||||
enum dsi_data_lanes dlanes;
|
||||
|
||||
dlanes = phy->data_lanes;
|
||||
|
||||
/**
|
||||
* For split link use case effective data lines need to be used
|
||||
* rather than total lanes on PHY for clock calculation and hence we
|
||||
* fall back pll->lanes to lanes_per_sublink rather than total
|
||||
* lanes.
|
||||
*/
|
||||
if (phy->cfg.split_link.enabled)
|
||||
return phy->cfg.split_link.lanes_per_sublink;
|
||||
|
||||
if (dlanes & DSI_DATA_LANE_0)
|
||||
num_of_lanes++;
|
||||
if (dlanes & DSI_DATA_LANE_1)
|
||||
num_of_lanes++;
|
||||
if (dlanes & DSI_DATA_LANE_2)
|
||||
num_of_lanes++;
|
||||
if (dlanes & DSI_DATA_LANE_3)
|
||||
num_of_lanes++;
|
||||
|
||||
return num_of_lanes;
|
||||
}
|
||||
|
||||
/**
|
||||
* dsi_phy_configure() - Configure DSI PHY PLL
|
||||
* @dsi_phy: DSI PHY handle.
|
||||
@@ -789,7 +823,8 @@ int dsi_phy_configure(struct msm_dsi_phy *phy, bool commit)
|
||||
|
||||
phy->pll->type = phy->cfg.phy_type;
|
||||
phy->pll->bpp = dsi_pixel_format_to_bpp(phy->dst_format);
|
||||
phy->pll->lanes = dsi_get_num_of_data_lanes(phy->data_lanes);
|
||||
phy->pll->lanes = dsi_phy_get_data_lanes_count(phy);
|
||||
|
||||
if (phy->hw.ops.configure)
|
||||
rc = phy->hw.ops.configure(phy->pll, commit);
|
||||
|
||||
|
@@ -1,17 +1,19 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include "msm_cooling_device.h"
|
||||
|
||||
#define BRIGHTNESS_CDEV_MAX 255
|
||||
|
||||
static int sde_cdev_get_max_brightness(struct thermal_cooling_device *cdev,
|
||||
unsigned long *state)
|
||||
{
|
||||
struct sde_cdev *disp_cdev = (struct sde_cdev *)cdev->devdata;
|
||||
|
||||
*state = disp_cdev->bd->props.max_brightness;
|
||||
*state = disp_cdev->bd->props.max_brightness / disp_cdev->cdev_sf;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -21,7 +23,8 @@ static int sde_cdev_get_cur_brightness(struct thermal_cooling_device *cdev,
|
||||
{
|
||||
struct sde_cdev *disp_cdev = (struct sde_cdev *)cdev->devdata;
|
||||
|
||||
*state = disp_cdev->bd->props.max_brightness - disp_cdev->thermal_state;
|
||||
*state = ((disp_cdev->bd->props.max_brightness -
|
||||
disp_cdev->thermal_state) / disp_cdev->cdev_sf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -32,10 +35,11 @@ static int sde_cdev_set_cur_brightness(struct thermal_cooling_device *cdev,
|
||||
struct sde_cdev *disp_cdev = (struct sde_cdev *)cdev->devdata;
|
||||
unsigned long brightness_lvl = 0;
|
||||
|
||||
if (state > disp_cdev->bd->props.max_brightness)
|
||||
if (state > disp_cdev->bd->props.max_brightness / disp_cdev->cdev_sf)
|
||||
return -EINVAL;
|
||||
|
||||
brightness_lvl = disp_cdev->bd->props.max_brightness - state;
|
||||
brightness_lvl = disp_cdev->bd->props.max_brightness -
|
||||
(state * disp_cdev->cdev_sf);
|
||||
if (brightness_lvl == disp_cdev->thermal_state)
|
||||
return 0;
|
||||
disp_cdev->thermal_state = brightness_lvl;
|
||||
@@ -67,6 +71,13 @@ struct sde_cdev *backlight_cdev_register(struct device *dev,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
disp_cdev->thermal_state = 0;
|
||||
disp_cdev->bd = bd;
|
||||
|
||||
if (bd->props.max_brightness > BRIGHTNESS_CDEV_MAX)
|
||||
disp_cdev->cdev_sf = (bd->props.max_brightness /
|
||||
BRIGHTNESS_CDEV_MAX);
|
||||
else
|
||||
disp_cdev->cdev_sf = 1;
|
||||
|
||||
disp_cdev->cdev = thermal_of_cooling_device_register(dev->of_node,
|
||||
(char *)dev_name(&bd->dev), disp_cdev,
|
||||
&sde_cdev_ops);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __SDE_THERMAL_CORE_H__
|
||||
@@ -16,6 +16,7 @@ struct sde_cdev {
|
||||
struct thermal_cooling_device *cdev;
|
||||
struct backlight_device *bd;
|
||||
unsigned long thermal_state;
|
||||
unsigned int cdev_sf;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_THERMAL_OF
|
||||
|
@@ -107,6 +107,24 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
|
||||
drm_fb_helper_hotplug_event(priv->fbdev);
|
||||
}
|
||||
|
||||
static void msm_drm_display_thread_priority_worker(struct kthread_work *work)
|
||||
{
|
||||
int ret = 0;
|
||||
struct sched_param param = { 0 };
|
||||
struct task_struct *task = current->group_leader;
|
||||
|
||||
/**
|
||||
* this priority was found during empiric testing to have appropriate
|
||||
* realtime scheduling to process display updates and interact with
|
||||
* other real time and normal priority task
|
||||
*/
|
||||
param.sched_priority = 16;
|
||||
ret = sched_setscheduler(task, SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
pr_warn("pid:%d name:%s priority update failed: %d\n",
|
||||
current->tgid, task->comm, ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* msm_atomic_helper_check - validate state object
|
||||
* @dev: DRM device
|
||||
@@ -663,20 +681,13 @@ static int msm_component_bind_all(struct device *dev,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int msm_drm_display_thread_create(struct sched_param param,
|
||||
struct msm_drm_private *priv, struct drm_device *ddev,
|
||||
static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct drm_device *ddev,
|
||||
struct device *dev)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
/**
|
||||
* this priority was found during empiric testing to have appropriate
|
||||
* realtime scheduling to process display updates and interact with
|
||||
* other real time and normal priority task
|
||||
*/
|
||||
param.sched_priority = 16;
|
||||
kthread_init_work(&priv->thread_priority_work, msm_drm_display_thread_priority_worker);
|
||||
for (i = 0; i < priv->num_crtcs; i++) {
|
||||
|
||||
/* initialize display thread */
|
||||
priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
|
||||
kthread_init_worker(&priv->disp_thread[i].worker);
|
||||
@@ -685,11 +696,7 @@ static int msm_drm_display_thread_create(struct sched_param param,
|
||||
kthread_run(kthread_worker_fn,
|
||||
&priv->disp_thread[i].worker,
|
||||
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
|
||||
ret = sched_setscheduler(priv->disp_thread[i].thread,
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
pr_warn("display thread priority update failed: %d\n",
|
||||
ret);
|
||||
kthread_queue_work(&priv->disp_thread[i].worker, &priv->thread_priority_work);
|
||||
|
||||
if (IS_ERR(priv->disp_thread[i].thread)) {
|
||||
dev_err(dev, "failed to create crtc_commit kthread\n");
|
||||
@@ -711,11 +718,7 @@ static int msm_drm_display_thread_create(struct sched_param param,
|
||||
* frame_pending counters beyond 2. This can lead to commit
|
||||
* failure at crtc commit level.
|
||||
*/
|
||||
ret = sched_setscheduler(priv->event_thread[i].thread,
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
pr_warn("display event thread priority update failed: %d\n",
|
||||
ret);
|
||||
kthread_queue_work(&priv->event_thread[i].worker, &priv->thread_priority_work);
|
||||
|
||||
if (IS_ERR(priv->event_thread[i].thread)) {
|
||||
dev_err(dev, "failed to create crtc_event kthread\n");
|
||||
@@ -750,12 +753,7 @@ static int msm_drm_display_thread_create(struct sched_param param,
|
||||
kthread_init_worker(&priv->pp_event_worker);
|
||||
priv->pp_event_thread = kthread_run(kthread_worker_fn,
|
||||
&priv->pp_event_worker, "pp_event");
|
||||
|
||||
ret = sched_setscheduler(priv->pp_event_thread,
|
||||
SCHED_FIFO, ¶m);
|
||||
if (ret)
|
||||
pr_warn("pp_event thread priority update failed: %d\n",
|
||||
ret);
|
||||
kthread_queue_work(&priv->pp_event_worker, &priv->thread_priority_work);
|
||||
|
||||
if (IS_ERR(priv->pp_event_thread)) {
|
||||
dev_err(dev, "failed to create pp_event kthread\n");
|
||||
@@ -765,8 +763,8 @@ static int msm_drm_display_thread_create(struct sched_param param,
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct msm_kms *_msm_drm_component_init_helper(
|
||||
struct msm_drm_private *priv,
|
||||
struct drm_device *ddev, struct device *dev,
|
||||
@@ -891,7 +889,6 @@ static int msm_drm_component_init(struct device *dev)
|
||||
struct msm_drm_private *priv = ddev->dev_private;
|
||||
struct msm_kms *kms = NULL;
|
||||
int ret;
|
||||
struct sched_param param = { 0 };
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
ret = msm_mdss_init(ddev);
|
||||
@@ -930,7 +927,7 @@ static int msm_drm_component_init(struct device *dev)
|
||||
sde_rotator_register();
|
||||
sde_rotator_smmu_driver_register();
|
||||
|
||||
ret = msm_drm_display_thread_create(param, priv, ddev, dev);
|
||||
ret = msm_drm_display_thread_create(priv, ddev, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "msm_drm_display_thread_create failed\n");
|
||||
goto fail;
|
||||
@@ -1501,7 +1498,7 @@ void msm_mode_object_event_notify(struct drm_mode_object *obj,
|
||||
|
||||
static int msm_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_file *file_priv;
|
||||
struct drm_minor *minor;
|
||||
struct drm_device *dev;
|
||||
struct msm_drm_private *priv;
|
||||
@@ -1513,6 +1510,7 @@ static int msm_release(struct inode *inode, struct file *filp)
|
||||
|
||||
mutex_lock(&msm_release_lock);
|
||||
|
||||
file_priv = filp->private_data;
|
||||
if (!file_priv) {
|
||||
ret = -EINVAL;
|
||||
goto end;
|
||||
|
@@ -175,7 +175,6 @@ enum msm_mdp_crtc_property {
|
||||
CRTC_PROP_ROT_CLK,
|
||||
CRTC_PROP_ROI_V1,
|
||||
CRTC_PROP_SECURITY_LEVEL,
|
||||
CRTC_PROP_IDLE_TIMEOUT,
|
||||
CRTC_PROP_DEST_SCALER,
|
||||
CRTC_PROP_CAPTURE_OUTPUT,
|
||||
|
||||
@@ -980,6 +979,8 @@ struct msm_drm_private {
|
||||
struct task_struct *pp_event_thread;
|
||||
struct kthread_worker pp_event_worker;
|
||||
|
||||
struct kthread_work thread_priority_work;
|
||||
|
||||
unsigned int num_encoders;
|
||||
struct drm_encoder *encoders[MAX_ENCODERS];
|
||||
|
||||
|
@@ -169,6 +169,8 @@ static bool feature_handoff_mask[SDE_CP_CRTC_MAX_FEATURES] = {
|
||||
[SDE_CP_CRTC_DSPP_SIXZONE] = 1,
|
||||
[SDE_CP_CRTC_DSPP_GAMUT] = 1,
|
||||
[SDE_CP_CRTC_DSPP_DITHER] = 1,
|
||||
[SDE_CP_CRTC_DSPP_SPR_INIT] = 1,
|
||||
[SDE_CP_CRTC_DSPP_DEMURA_INIT] = 1,
|
||||
};
|
||||
|
||||
typedef void (*dspp_cap_update_func_t)(struct sde_crtc *crtc,
|
||||
@@ -908,6 +910,18 @@ static int _set_demura_feature(struct sde_hw_dspp *hw_dspp,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _feature_unsupported(struct sde_hw_dspp *hw_dspp,
|
||||
struct sde_hw_cp_cfg *hw_cfg,
|
||||
struct sde_crtc *sde_crtc)
|
||||
{
|
||||
if (!hw_dspp || !hw_cfg || !sde_crtc) {
|
||||
DRM_ERROR("invalid argumets\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
feature_wrapper check_crtc_feature_wrappers[SDE_CP_CRTC_MAX_FEATURES];
|
||||
#define setup_check_crtc_feature_wrappers(wrappers) \
|
||||
do { \
|
||||
@@ -2192,6 +2206,45 @@ exit:
|
||||
sde_cp_disable_features(crtc);
|
||||
}
|
||||
|
||||
void sde_cp_reset_unsupported_feature_wrappers(struct sde_mdss_cfg *catalog)
|
||||
{
|
||||
if (!catalog) {
|
||||
DRM_ERROR("invalid catalog\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!catalog->rc_count) {
|
||||
check_crtc_feature_wrappers[SDE_CP_CRTC_DSPP_RC_MASK] =
|
||||
_feature_unsupported;
|
||||
check_crtc_pu_feature_wrappers[SDE_CP_CRTC_DSPP_RC_PU] =
|
||||
_feature_unsupported;
|
||||
set_crtc_feature_wrappers[SDE_CP_CRTC_DSPP_RC_MASK] =
|
||||
_feature_unsupported;
|
||||
set_crtc_pu_feature_wrappers[SDE_CP_CRTC_DSPP_RC_PU] =
|
||||
_feature_unsupported;
|
||||
}
|
||||
|
||||
if (!catalog->demura_count) {
|
||||
check_crtc_pu_feature_wrappers[SDE_CP_CRTC_DSPP_DEMURA_PU] =
|
||||
_feature_unsupported;
|
||||
set_crtc_feature_wrappers[SDE_CP_CRTC_DSPP_DEMURA_INIT] =
|
||||
_feature_unsupported;
|
||||
set_crtc_pu_feature_wrappers[SDE_CP_CRTC_DSPP_DEMURA_PU] =
|
||||
_feature_unsupported;
|
||||
}
|
||||
|
||||
if (!catalog->spr_count) {
|
||||
check_crtc_pu_feature_wrappers[SDE_CP_CRTC_DSPP_SPR_PU] =
|
||||
_feature_unsupported;
|
||||
set_crtc_feature_wrappers[SDE_CP_CRTC_DSPP_SPR_INIT] =
|
||||
_feature_unsupported;
|
||||
set_crtc_pu_feature_wrappers[SDE_CP_CRTC_DSPP_SPR_PU] =
|
||||
_feature_unsupported;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_kms *kms = NULL;
|
||||
@@ -2249,6 +2302,7 @@ void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
|
||||
setup_check_crtc_pu_feature_wrappers(
|
||||
check_crtc_pu_feature_wrappers);
|
||||
setup_dspp_caps_funcs(dspp_cap_update_func);
|
||||
sde_cp_reset_unsupported_feature_wrappers(catalog);
|
||||
}
|
||||
if (!priv->cp_property)
|
||||
goto exit;
|
||||
@@ -2612,12 +2666,11 @@ void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
|
||||
INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
|
||||
}
|
||||
|
||||
void sde_cp_crtc_suspend(struct drm_crtc *crtc)
|
||||
void sde_cp_crtc_mark_features_dirty(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = NULL;
|
||||
struct sde_cp_node *prop_node = NULL, *n = NULL;
|
||||
bool ad_suspend = false;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (!crtc) {
|
||||
DRM_ERROR("crtc %pK\n", crtc);
|
||||
@@ -2644,12 +2697,30 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc)
|
||||
}
|
||||
mutex_unlock(&sde_crtc->crtc_cp_lock);
|
||||
|
||||
if (ad_suspend)
|
||||
_sde_cp_ad_set_prop(sde_crtc, AD_SUSPEND);
|
||||
}
|
||||
|
||||
void sde_cp_crtc_suspend(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = NULL;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (!crtc) {
|
||||
DRM_ERROR("crtc %pK\n", crtc);
|
||||
return;
|
||||
}
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
if (!sde_crtc) {
|
||||
DRM_ERROR("sde_crtc %pK\n", sde_crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
sde_cp_crtc_mark_features_dirty(crtc);
|
||||
|
||||
spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
|
||||
sde_crtc->ltm_hist_en = false;
|
||||
spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
|
||||
|
||||
if (ad_suspend)
|
||||
_sde_cp_ad_set_prop(sde_crtc, AD_SUSPEND);
|
||||
}
|
||||
|
||||
void sde_cp_crtc_resume(struct drm_crtc *crtc)
|
||||
|
@@ -228,6 +228,12 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
|
||||
int sde_cp_crtc_get_property(struct drm_crtc *crtc,
|
||||
struct drm_property *property, uint64_t *val);
|
||||
|
||||
/**
|
||||
* sde_cp_crtc_mark_features_dirty: Move the cp features from active list to dirty list
|
||||
* @crtc: Pointer to crtc.
|
||||
*/
|
||||
void sde_cp_crtc_mark_features_dirty(struct drm_crtc *crtc);
|
||||
|
||||
/**
|
||||
* sde_cp_crtc_suspend: Suspend the crtc features
|
||||
* @crtc: Pointer to crtc.
|
||||
|
@@ -105,6 +105,8 @@ static void sde_dimming_bl_notify(struct sde_connector *conn, struct dsi_backlig
|
||||
bl_info.bl_scale_sv = config->bl_scale_sv;
|
||||
bl_info.status = config->dimming_status;
|
||||
bl_info.min_bl = config->dimming_min_bl;
|
||||
bl_info.bl_scale_max = MAX_BL_SCALE_LEVEL;
|
||||
bl_info.bl_scale_sv_max = SV_BL_SCALE_CAP;
|
||||
event.type = DRM_EVENT_DIMMING_BL;
|
||||
event.length = sizeof(bl_info);
|
||||
SDE_DEBUG("dimming BL event bl_level %d bl_scale %d, bl_scale_sv = %d "
|
||||
@@ -818,7 +820,8 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
|
||||
|
||||
bl_config->bl_scale = c_conn->bl_scale > MAX_BL_SCALE_LEVEL ?
|
||||
MAX_BL_SCALE_LEVEL : c_conn->bl_scale;
|
||||
bl_config->bl_scale_sv = c_conn->bl_scale_sv;
|
||||
bl_config->bl_scale_sv = c_conn->bl_scale_sv > SV_BL_SCALE_CAP ?
|
||||
SV_BL_SCALE_CAP : c_conn->bl_scale_sv;
|
||||
|
||||
SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
|
||||
bl_config->bl_scale, bl_config->bl_scale_sv,
|
||||
|
@@ -517,7 +517,7 @@ static void _sde_core_uidle_setup_cfg(struct sde_kms *kms,
|
||||
uidle->ops.set_uidle_ctl(uidle, &cfg);
|
||||
}
|
||||
|
||||
static void _sde_core_uidle_setup_ctl(struct drm_crtc *crtc,
|
||||
void sde_core_perf_uidle_setup_ctl(struct drm_crtc *crtc,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_encoder *drm_enc;
|
||||
@@ -547,7 +547,7 @@ static int _sde_core_perf_enable_uidle(struct sde_kms *kms,
|
||||
SDE_EVT32(uidle_state);
|
||||
_sde_core_uidle_setup_wd(kms, enable);
|
||||
_sde_core_uidle_setup_cfg(kms, uidle_state);
|
||||
_sde_core_uidle_setup_ctl(crtc, enable);
|
||||
sde_core_perf_uidle_setup_ctl(crtc, true);
|
||||
|
||||
kms->perf.uidle_enabled = enable;
|
||||
|
||||
@@ -602,7 +602,7 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
|
||||
struct drm_crtc *tmp_crtc;
|
||||
struct sde_kms *kms;
|
||||
enum sde_uidle_state uidle_status = UIDLE_STATE_FAL1_FAL10;
|
||||
u32 fps;
|
||||
u32 fps, num_crtc = 0;
|
||||
|
||||
if (!crtc) {
|
||||
SDE_ERROR("invalid crtc\n");
|
||||
@@ -630,6 +630,7 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
|
||||
|
||||
if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
|
||||
|
||||
num_crtc++;
|
||||
/*
|
||||
* If DFPS is enabled with VFP, SDE clock and
|
||||
* transfer time will get fixed at max FPS
|
||||
@@ -647,7 +648,7 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
|
||||
_sde_core_perf_is_cwb(tmp_crtc),
|
||||
uidle_status, uidle_crtc_status, enable);
|
||||
|
||||
if (_sde_core_perf_is_wb(tmp_crtc) ||
|
||||
if ((num_crtc > 1) || _sde_core_perf_is_wb(tmp_crtc) ||
|
||||
_sde_core_perf_is_cwb(tmp_crtc) || !fps) {
|
||||
uidle_status = UIDLE_STATE_DISABLE;
|
||||
break;
|
||||
@@ -672,6 +673,8 @@ void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc,
|
||||
_sde_core_perf_enable_uidle(kms, crtc,
|
||||
enable ? uidle_status : UIDLE_STATE_DISABLE);
|
||||
|
||||
kms->perf.catalog->uidle_cfg.dirty = !enable;
|
||||
|
||||
/* If perf counters enabled, set them up now */
|
||||
if (kms->catalog->uidle_cfg.debugfs_perf)
|
||||
_sde_core_perf_uidle_setup_cntr(kms, enable);
|
||||
@@ -846,6 +849,11 @@ void sde_core_perf_crtc_reserve_res(struct drm_crtc *crtc, u64 reserve_rate)
|
||||
/* use current perf, which are the values voted */
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
kms = _sde_crtc_get_kms(crtc);
|
||||
if (!kms || !kms->dev) {
|
||||
SDE_ERROR("invalid kms\n");
|
||||
return;
|
||||
}
|
||||
|
||||
priv = kms->dev->dev_private;
|
||||
|
||||
kms->perf.core_clk_reserve_rate = max(kms->perf.core_clk_reserve_rate, reserve_rate);
|
||||
|
@@ -146,6 +146,13 @@ void sde_core_perf_crtc_reserve_res(struct drm_crtc *crtc, u64 reserve_rate);
|
||||
*/
|
||||
void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc, bool enable);
|
||||
|
||||
/**
|
||||
* sde_core_perf_uidle_setup_ctl - enable uidle DB control
|
||||
* @crtc: Pointer to crtc
|
||||
* @enable: enable/disable uidle DB
|
||||
*/
|
||||
void sde_core_perf_uidle_setup_ctl(struct drm_crtc *crtc, bool enable);
|
||||
|
||||
/**
|
||||
* sde_core_perf_destroy - destroy the given core performance context
|
||||
* @perf: Pointer to core performance context
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
@@ -73,6 +74,9 @@ static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
|
||||
static int _sde_crtc_set_noise_layer(struct sde_crtc *sde_crtc,
|
||||
struct sde_crtc_state *cstate,
|
||||
void __user *usr_ptr);
|
||||
static int sde_crtc_vm_release_handler(struct drm_crtc *crtc_drm,
|
||||
bool en, struct sde_irq_callback *irq);
|
||||
|
||||
|
||||
static struct sde_crtc_custom_events custom_events[] = {
|
||||
{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
|
||||
@@ -84,6 +88,7 @@ static struct sde_crtc_custom_events custom_events[] = {
|
||||
{DRM_EVENT_LTM_WB_PB, sde_cp_ltm_wb_pb_interrupt},
|
||||
{DRM_EVENT_LTM_OFF, sde_cp_ltm_off_event_handler},
|
||||
{DRM_EVENT_MMRM_CB, sde_crtc_mmrm_interrupt_handler},
|
||||
{DRM_EVENT_VM_RELEASE, sde_crtc_vm_release_handler},
|
||||
};
|
||||
|
||||
/* default input fence timeout, in ms */
|
||||
@@ -3713,8 +3718,13 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
_sde_crtc_dest_scaler_setup(crtc);
|
||||
sde_cp_crtc_apply_noise(crtc, old_state);
|
||||
|
||||
if (crtc->state->mode_changed)
|
||||
if (crtc->state->mode_changed || sde_kms->perf.catalog->uidle_cfg.dirty) {
|
||||
sde_core_perf_crtc_update_uidle(crtc, true);
|
||||
} else if (!test_bit(SDE_CRTC_DIRTY_UIDLE, &sde_crtc->revalidate_mask) &&
|
||||
sde_kms->perf.uidle_enabled)
|
||||
sde_core_perf_uidle_setup_ctl(crtc, false);
|
||||
|
||||
test_and_clear_bit(SDE_CRTC_DIRTY_UIDLE, &sde_crtc->revalidate_mask);
|
||||
|
||||
/*
|
||||
* Since CP properties use AXI buffer to program the
|
||||
@@ -3734,7 +3744,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
sde_cp_crtc_apply_properties(crtc);
|
||||
|
||||
if (!sde_crtc->enabled)
|
||||
sde_cp_crtc_suspend(crtc);
|
||||
sde_cp_crtc_mark_features_dirty(crtc);
|
||||
|
||||
/*
|
||||
* PP_DONE irq is only used by command mode for now.
|
||||
@@ -3794,7 +3804,7 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
dev = crtc->dev;
|
||||
priv = dev->dev_private;
|
||||
|
||||
if ((sde_crtc->cache_state == CACHE_STATE_PRE_CACHE) &&
|
||||
if ((sde_crtc->cache_state == CACHE_STATE_NORMAL) &&
|
||||
sde_crtc_get_property(cstate, CRTC_PROP_CACHE_STATE))
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_FRAME_WRITE,
|
||||
false);
|
||||
@@ -3963,37 +3973,6 @@ static void _sde_crtc_remove_pipe_flush(struct drm_crtc *crtc)
|
||||
}
|
||||
}
|
||||
|
||||
static void _sde_crtc_schedule_idle_notify(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
|
||||
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
|
||||
struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
|
||||
struct msm_drm_private *priv;
|
||||
struct msm_drm_thread *event_thread;
|
||||
int idle_time = 0;
|
||||
|
||||
if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
|
||||
return;
|
||||
|
||||
priv = sde_kms->dev->dev_private;
|
||||
|
||||
idle_time = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_TIMEOUT);
|
||||
|
||||
if (!idle_time ||
|
||||
!sde_encoder_check_curr_mode(sde_crtc->mixers[0].encoder,
|
||||
MSM_DISPLAY_VIDEO_MODE) ||
|
||||
(crtc->index >= ARRAY_SIZE(priv->event_thread)) ||
|
||||
(sde_crtc->cache_state > CACHE_STATE_NORMAL))
|
||||
return;
|
||||
|
||||
/* schedule the idle notify delayed work */
|
||||
event_thread = &priv->event_thread[crtc->index];
|
||||
|
||||
kthread_mod_delayed_work(&event_thread->worker,
|
||||
&sde_crtc->idle_notify_work, msecs_to_jiffies(idle_time));
|
||||
SDE_DEBUG("schedule idle notify work in %dms\n", idle_time);
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_crtc_reset_hw - attempt hardware reset on errors
|
||||
* @crtc: Pointer to DRM crtc instance
|
||||
@@ -4218,8 +4197,6 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
_sde_crtc_schedule_idle_notify(crtc);
|
||||
|
||||
SDE_ATRACE_END("crtc_commit");
|
||||
}
|
||||
|
||||
@@ -4434,6 +4411,7 @@ void sde_crtc_reset_sw_state(struct drm_crtc *crtc)
|
||||
|
||||
/* mark other properties which need to be dirty for next update */
|
||||
set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, &sde_crtc->revalidate_mask);
|
||||
set_bit(SDE_CRTC_DIRTY_UIDLE, &sde_crtc->revalidate_mask);
|
||||
if (cstate->num_ds_enabled)
|
||||
set_bit(SDE_CRTC_DIRTY_DEST_SCALER, cstate->dirty);
|
||||
}
|
||||
@@ -4637,7 +4615,6 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
|
||||
mutex_lock(&sde_crtc->crtc_lock);
|
||||
|
||||
kthread_cancel_delayed_work_sync(&sde_crtc->static_cache_read_work);
|
||||
kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work);
|
||||
|
||||
SDE_EVT32(DRMID(crtc), sde_crtc->enabled, crtc->state->active,
|
||||
crtc->state->enable, sde_crtc->cached_encoder_mask);
|
||||
@@ -5974,10 +5951,6 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
|
||||
|
||||
sde_crtc_install_perf_properties(sde_crtc, sde_kms, catalog, info);
|
||||
|
||||
msm_property_install_range(&sde_crtc->property_info,
|
||||
"idle_time", 0, 0, U64_MAX, 0,
|
||||
CRTC_PROP_IDLE_TIMEOUT);
|
||||
|
||||
if (test_bit(SDE_FEATURE_TRUSTED_VM, catalog->features)) {
|
||||
int init_idx = sde_in_trusted_vm(sde_kms) ? 1 : 0;
|
||||
|
||||
@@ -7167,12 +7140,8 @@ void sde_crtc_static_img_control(struct drm_crtc *crtc,
|
||||
kthread_cancel_delayed_work_sync(
|
||||
&sde_crtc->static_cache_read_work);
|
||||
break;
|
||||
case CACHE_STATE_PRE_CACHE:
|
||||
if (sde_crtc->cache_state != CACHE_STATE_NORMAL)
|
||||
return;
|
||||
break;
|
||||
case CACHE_STATE_FRAME_WRITE:
|
||||
if (sde_crtc->cache_state != CACHE_STATE_PRE_CACHE)
|
||||
if (sde_crtc->cache_state != CACHE_STATE_NORMAL)
|
||||
return;
|
||||
break;
|
||||
case CACHE_STATE_FRAME_READ:
|
||||
@@ -7268,33 +7237,10 @@ void sde_crtc_static_cache_read_kickoff(struct drm_crtc *crtc)
|
||||
msecs_to_jiffies(msecs_fps));
|
||||
}
|
||||
|
||||
/*
|
||||
* __sde_crtc_idle_notify_work - signal idle timeout to user space
|
||||
*/
|
||||
static void __sde_crtc_idle_notify_work(struct kthread_work *work)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc,
|
||||
idle_notify_work.work);
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
|
||||
if (!sde_crtc) {
|
||||
SDE_ERROR("invalid sde crtc\n");
|
||||
} else {
|
||||
crtc = &sde_crtc->base;
|
||||
sde_crtc_event_notify(crtc, DRM_EVENT_IDLE_NOTIFY, sizeof(u32), ret);
|
||||
|
||||
SDE_DEBUG("crtc[%d]: idle timeout notified\n", crtc->base.id);
|
||||
|
||||
sde_crtc_static_img_control(crtc, CACHE_STATE_PRE_CACHE, false);
|
||||
}
|
||||
}
|
||||
|
||||
void sde_crtc_cancel_delayed_work(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct sde_crtc_state *cstate;
|
||||
bool idle_status;
|
||||
bool cache_status;
|
||||
|
||||
if (!crtc || !crtc->state)
|
||||
@@ -7303,9 +7249,8 @@ void sde_crtc_cancel_delayed_work(struct drm_crtc *crtc)
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
cstate = to_sde_crtc_state(crtc->state);
|
||||
|
||||
idle_status = kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work);
|
||||
cache_status = kthread_cancel_delayed_work_sync(&sde_crtc->static_cache_read_work);
|
||||
SDE_EVT32(DRMID(crtc), idle_status, cache_status);
|
||||
SDE_EVT32(DRMID(crtc), cache_status);
|
||||
}
|
||||
|
||||
/* initialize crtc */
|
||||
@@ -7401,8 +7346,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
|
||||
sde_crtc->new_perf.llcc_active[i] = false;
|
||||
}
|
||||
|
||||
kthread_init_delayed_work(&sde_crtc->idle_notify_work,
|
||||
__sde_crtc_idle_notify_work);
|
||||
kthread_init_delayed_work(&sde_crtc->static_cache_read_work,
|
||||
__sde_crtc_static_cache_read_work);
|
||||
|
||||
@@ -7636,6 +7579,11 @@ static int sde_crtc_mmrm_interrupt_handler(struct drm_crtc *crtc_drm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sde_crtc_vm_release_handler(struct drm_crtc *crtc_drm,
|
||||
bool en, struct sde_irq_callback *irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
* sde_crtc_update_cont_splash_settings - update mixer settings
|
||||
* and initial clk during device bootup for cont_splash use case
|
||||
@@ -7792,3 +7740,8 @@ void sde_crtc_disable_cp_features(struct drm_crtc *crtc)
|
||||
{
|
||||
sde_cp_disable_features(crtc);
|
||||
}
|
||||
|
||||
void _sde_crtc_vm_release_notify(struct drm_crtc *crtc)
|
||||
{
|
||||
sde_crtc_event_notify(crtc, DRM_EVENT_VM_RELEASE, sizeof(uint32_t), 1);
|
||||
}
|
||||
|
@@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2013 Red Hat
|
||||
* Author: Rob Clark <robdclark@gmail.com>
|
||||
@@ -286,7 +287,6 @@ struct sde_frame_data {
|
||||
* @misr_reconfigure : boolean entry indicates misr reconfigure status
|
||||
* @misr_frame_count : misr frame count provided by client
|
||||
* @misr_data : store misr data before turning off the clocks.
|
||||
* @idle_notify_work: delayed worker to notify idle timeout to user space
|
||||
* @power_event : registered power event handle
|
||||
* @cur_perf : current performance committed to clock/bandwidth driver
|
||||
* @plane_mask_old: keeps track of the planes used in the previous commit
|
||||
@@ -376,7 +376,6 @@ struct sde_crtc {
|
||||
bool misr_enable_debugfs;
|
||||
bool misr_reconfigure;
|
||||
u32 misr_frame_count;
|
||||
struct kthread_delayed_work idle_notify_work;
|
||||
|
||||
struct sde_power_event *power_event;
|
||||
|
||||
@@ -426,6 +425,7 @@ enum sde_crtc_dirty_flags {
|
||||
SDE_CRTC_DIRTY_DEST_SCALER,
|
||||
SDE_CRTC_DIRTY_DIM_LAYERS,
|
||||
SDE_CRTC_NOISE_LAYER,
|
||||
SDE_CRTC_DIRTY_UIDLE,
|
||||
SDE_CRTC_DIRTY_MAX,
|
||||
};
|
||||
|
||||
@@ -1061,4 +1061,9 @@ void sde_crtc_cancel_delayed_work(struct drm_crtc *crtc);
|
||||
*/
|
||||
struct drm_encoder *sde_crtc_get_src_encoder_of_clone(struct drm_crtc *crtc);
|
||||
|
||||
/*
|
||||
* _sde_crtc_vm_release_notify- send event to usermode on vm release
|
||||
*/
|
||||
void _sde_crtc_vm_release_notify(struct drm_crtc *crtc);
|
||||
|
||||
#endif /* _SDE_CRTC_H_ */
|
||||
|
@@ -1564,20 +1564,9 @@ static int _sde_encoder_update_rsc_client(
|
||||
(rsc_config->prefill_lines != mode_info->prefill_lines) ||
|
||||
(rsc_config->jitter_numer != mode_info->jitter_numer) ||
|
||||
(rsc_config->jitter_denom != mode_info->jitter_denom)) {
|
||||
|
||||
rsc_config->fps = mode_info->frame_rate;
|
||||
rsc_config->vtotal = mode_info->vtotal;
|
||||
/*
|
||||
* for video mode, prefill lines should not go beyond vertical
|
||||
* front porch for RSCC configuration. This will ensure bw
|
||||
* downvotes are not sent within the active region. Additional
|
||||
* -1 is to give one line time for rscc mode min_threshold.
|
||||
*/
|
||||
if (is_vid_mode && (mode_info->prefill_lines >= v_front_porch))
|
||||
rsc_config->prefill_lines = v_front_porch - 1;
|
||||
else
|
||||
rsc_config->prefill_lines = mode_info->prefill_lines;
|
||||
|
||||
rsc_config->jitter_numer = mode_info->jitter_numer;
|
||||
rsc_config->jitter_denom = mode_info->jitter_denom;
|
||||
sde_enc->rsc_state_init = false;
|
||||
|
@@ -5161,7 +5161,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
|
||||
sde_cfg->mdss_hw_block_size = 0x158;
|
||||
set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
|
||||
sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
|
||||
} else if (IS_WAIPIO_TARGET(hw_rev)) {
|
||||
} else if (IS_WAIPIO_TARGET(hw_rev) || IS_CAPE_TARGET(hw_rev)) {
|
||||
sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH;
|
||||
set_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_CWB_DITHER, sde_cfg->features);
|
||||
@@ -5211,6 +5211,38 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
|
||||
set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
|
||||
sde_cfg->mdss_hw_block_size = 0x158;
|
||||
set_bit(SDE_FEATURE_RC_LM_FLUSH_OVERRIDE, sde_cfg->features);
|
||||
} else if (IS_DIWALI_TARGET(hw_rev)) {
|
||||
sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH;
|
||||
set_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_CWB_DITHER, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_WB_UBWC, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_CWB_CROP, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_QSYNC, sde_cfg->features);
|
||||
sde_cfg->perf.min_prefill_lines = 40;
|
||||
sde_cfg->vbif_qos_nlvl = 8;
|
||||
sde_cfg->ts_prefill_rev = 2;
|
||||
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
|
||||
set_bit(SDE_FEATURE_3D_MERGE_RESET, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_HDR_PLUS, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_INLINE_SKIP_THRESHOLD, sde_cfg->features);
|
||||
set_bit(SDE_MDP_DHDR_MEMPOOL_4K, &sde_cfg->mdp[0].features);
|
||||
set_bit(SDE_FEATURE_VIG_P010, sde_cfg->features);
|
||||
sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_2_0_1;
|
||||
set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
|
||||
sde_cfg->mdss_hw_block_size = 0x158;
|
||||
sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
|
||||
set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_FP16, sde_cfg->features);
|
||||
set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features);
|
||||
set_bit(SDE_FEATURE_HW_VSYNC_TS, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_AVR_STEP, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_UBWC_STATS, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_DEMURA, sde_cfg->features);
|
||||
sde_cfg->demura_supported[SSPP_DMA1][0] = 0;
|
||||
sde_cfg->demura_supported[SSPP_DMA1][1] = 1;
|
||||
} else if (IS_KALAMA_TARGET(hw_rev)) {
|
||||
set_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features);
|
||||
set_bit(SDE_FEATURE_CWB_DITHER, sde_cfg->features);
|
||||
|
@@ -50,6 +50,8 @@
|
||||
#define SDE_HW_VER_700 SDE_HW_VER(7, 0, 0) /* lahaina */
|
||||
#define SDE_HW_VER_720 SDE_HW_VER(7, 2, 0) /* yupik */
|
||||
#define SDE_HW_VER_810 SDE_HW_VER(8, 1, 0) /* waipio */
|
||||
#define SDE_HW_VER_820 SDE_HW_VER(8, 2, 0) /* diwali */
|
||||
#define SDE_HW_VER_850 SDE_HW_VER(8, 5, 0) /* cape */
|
||||
#define SDE_HW_VER_900 SDE_HW_VER(9, 0, 0) /* kalama */
|
||||
|
||||
/* Avoid using below IS_XXX macros outside catalog, use feature bit instead */
|
||||
@@ -77,6 +79,8 @@
|
||||
#define IS_LAHAINA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_700)
|
||||
#define IS_YUPIK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_720)
|
||||
#define IS_WAIPIO_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_810)
|
||||
#define IS_DIWALI_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_820)
|
||||
#define IS_CAPE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_850)
|
||||
#define IS_KALAMA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_900)
|
||||
|
||||
#define SDE_HW_BLK_NAME_LEN 16
|
||||
@@ -1173,6 +1177,7 @@ struct sde_mdp_cfg {
|
||||
* logging
|
||||
* @debugfs_ctrl: uidle is enabled/disabled through debugfs
|
||||
* @perf_cntr_en: performance counters are enabled/disabled
|
||||
* @dirty: dirty flag for uidle update
|
||||
*/
|
||||
struct sde_uidle_cfg {
|
||||
SDE_HW_BLK_INFO;
|
||||
@@ -1192,6 +1197,7 @@ struct sde_uidle_cfg {
|
||||
u32 debugfs_perf;
|
||||
bool debugfs_ctrl;
|
||||
bool perf_cntr_en;
|
||||
bool dirty;
|
||||
};
|
||||
|
||||
/* struct sde_mdp_cfg : MDP TOP-BLK instance info
|
||||
|
@@ -624,20 +624,20 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
|
||||
struct sde_hw_tear_check *te)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
int cfg;
|
||||
u32 cfg = 0;
|
||||
|
||||
if (!intf)
|
||||
return -EINVAL;
|
||||
|
||||
c = &intf->hw;
|
||||
|
||||
cfg = BIT(19); /* VSYNC_COUNTER_EN */
|
||||
if (te->hw_vsync_mode)
|
||||
cfg |= BIT(20);
|
||||
|
||||
cfg |= te->vsync_count;
|
||||
|
||||
SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
|
||||
wmb(); /* disable vsync counter before updating single buffer registers */
|
||||
SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
|
||||
SDE_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val);
|
||||
SDE_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq);
|
||||
@@ -648,6 +648,8 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf,
|
||||
te->sync_threshold_start));
|
||||
SDE_REG_WRITE(c, INTF_TEAR_SYNC_WRCOUNT,
|
||||
(te->start_pos + te->sync_threshold_start + 1));
|
||||
cfg |= BIT(19); /* VSYNC_COUNTER_EN */
|
||||
SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_VSYNC, cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -588,6 +588,13 @@ static int sde_hw_rc_check_mask_cfg(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hw_cfg->panel_height != rc_mask_cfg->height ||
|
||||
rc_mask_cfg->width != hw_cfg->panel_width) {
|
||||
SDE_ERROR("RC mask Layer: h %d w %d panel: h %d w %d mismatch\n",
|
||||
rc_mask_cfg->height, rc_mask_cfg->width,
|
||||
hw_cfg->panel_height, hw_cfg->panel_width);
|
||||
return -EINVAL;
|
||||
}
|
||||
flags = rc_mask_cfg->flags;
|
||||
cfg_param_01 = rc_mask_cfg->cfg_param_01;
|
||||
cfg_param_02 = rc_mask_cfg->cfg_param_02;
|
||||
@@ -728,7 +735,8 @@ int sde_hw_rc_check_mask(struct sde_hw_dspp *hw_dspp, void *cfg)
|
||||
|
||||
if (hw_cfg->len != sizeof(struct drm_msm_rc_mask_cfg) ||
|
||||
!hw_cfg->payload) {
|
||||
SDE_ERROR("invalid payload\n");
|
||||
SDE_ERROR("invalid payload len %d exp %zd\n", hw_cfg->len,
|
||||
sizeof(struct drm_msm_rc_mask_cfg));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -773,6 +781,7 @@ int sde_hw_rc_check_pu_roi(struct sde_hw_dspp *hw_dspp, void *cfg)
|
||||
roi_list = hw_cfg->payload;
|
||||
if (!roi_list) {
|
||||
SDE_DEBUG("full frame update\n");
|
||||
memset(&empty_roi_list, 0, sizeof(struct msm_roi_list));
|
||||
roi_list = &empty_roi_list;
|
||||
}
|
||||
|
||||
@@ -835,6 +844,7 @@ int sde_hw_rc_setup_pu_roi(struct sde_hw_dspp *hw_dspp, void *cfg)
|
||||
roi_list = hw_cfg->payload;
|
||||
if (!roi_list) {
|
||||
SDE_DEBUG("full frame update\n");
|
||||
memset(&empty_roi_list, 0, sizeof(struct msm_roi_list));
|
||||
roi_list = &empty_roi_list;
|
||||
}
|
||||
|
||||
@@ -904,6 +914,9 @@ int sde_hw_rc_setup_mask(struct sde_hw_dspp *hw_dspp, void *cfg)
|
||||
memset(RC_STATE(hw_dspp).last_rc_mask_cfg, 0,
|
||||
sizeof(struct drm_msm_rc_mask_cfg));
|
||||
RC_STATE(hw_dspp).mask_programmed = false;
|
||||
memset(RC_STATE(hw_dspp).last_roi_list, 0,
|
||||
sizeof(struct msm_roi_list));
|
||||
RC_STATE(hw_dspp).roi_programmed = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1084,17 +1084,14 @@ static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
|
||||
}
|
||||
|
||||
if (cfg->time) {
|
||||
u64 temp = DIV_ROUND_UP_ULL(TS_CLK * 1000000ULL, cfg->time);
|
||||
|
||||
ts_bytes = temp * cfg->size;
|
||||
ts_count = DIV_ROUND_UP_ULL(TS_CLK * cfg->time, 1000000ULL);
|
||||
ts_bytes = DIV_ROUND_UP_ULL(cfg->size, ts_count);
|
||||
if (ts_bytes > SSPP_TRAFFIC_SHAPER_BPC_MAX)
|
||||
ts_bytes = SSPP_TRAFFIC_SHAPER_BPC_MAX;
|
||||
}
|
||||
|
||||
if (ts_bytes) {
|
||||
ts_count = DIV_ROUND_UP_ULL(cfg->size, ts_bytes);
|
||||
if (ts_count)
|
||||
ts_bytes |= BIT(31) | BIT(27);
|
||||
}
|
||||
|
||||
SDE_REG_WRITE(&ctx->hw, ts_offset, ts_bytes);
|
||||
SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
|
||||
|
@@ -58,7 +58,9 @@
|
||||
#include <linux/qcom-iommu-util.h>
|
||||
#include "soc/qcom/secure_buffer.h"
|
||||
#include <linux/qtee_shmbridge.h>
|
||||
#ifdef CONFIG_DRM_SDE_VM
|
||||
#include <linux/gunyah/gh_irq_lend.h>
|
||||
#endif
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "sde_trace.h"
|
||||
@@ -1459,6 +1461,8 @@ int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
|
||||
}
|
||||
sde_vm_unlock(sde_kms);
|
||||
|
||||
_sde_crtc_vm_release_notify(crtc);
|
||||
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
@@ -4831,6 +4835,7 @@ parse_fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_SDE_VM
|
||||
int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(sde_kms->dev->dev);
|
||||
@@ -4862,6 +4867,7 @@ int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int sde_kms_hw_init(struct msm_kms *kms)
|
||||
{
|
||||
|
@@ -481,7 +481,11 @@ void *sde_debugfs_get_root(struct sde_kms *sde_kms);
|
||||
* These functions/definitions allow for building up a 'sde_info' structure
|
||||
* containing one or more "key=value\n" entries.
|
||||
*/
|
||||
#define SDE_KMS_INFO_MAX_SIZE 4096
|
||||
#if IS_ENABLED(CONFIG_DRM_LOW_MSM_MEM_FOOTPRINT)
|
||||
#define SDE_KMS_INFO_MAX_SIZE (1 << 12)
|
||||
#else
|
||||
#define SDE_KMS_INFO_MAX_SIZE (1 << 14)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct sde_kms_info - connector information structure container
|
||||
|
@@ -53,11 +53,11 @@ int _sde_vm_reclaim_mem(struct sde_kms *sde_kms)
|
||||
rc = gh_rm_mem_reclaim(sde_vm->base.io_mem_handle, 0);
|
||||
if (rc) {
|
||||
SDE_ERROR("failed to reclaim IO memory, rc=%d\n", rc);
|
||||
goto reclaim_fail;
|
||||
return rc;
|
||||
}
|
||||
|
||||
SDE_INFO("mem reclaim succeeded\n");
|
||||
reclaim_fail:
|
||||
|
||||
sde_vm->base.io_mem_handle = -1;
|
||||
|
||||
return rc;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "msm_drv.h"
|
||||
@@ -23,6 +23,7 @@ enum sde_dsc_ratio_type {
|
||||
DSC_V12_422_8BPC_7BPP,
|
||||
DSC_V12_422_8BPC_8BPP,
|
||||
DSC_V12_422_10BPC_7BPP,
|
||||
DSC_V12_422_10BPC_8BPP,
|
||||
DSC_V12_422_10BPC_10BPP,
|
||||
DSC_V12_420_8BPC_6BPP,
|
||||
DSC_V12_420_10BPC_6BPP,
|
||||
@@ -50,6 +51,7 @@ static char sde_dsc_rc_range_min_qp[DSC_RATIO_TYPE_MAX][DSC_NUM_BUF_RANGES] = {
|
||||
{0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11},
|
||||
{0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10},
|
||||
{0, 4, 5, 6, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 15},
|
||||
{0, 2, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14},
|
||||
{0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12},
|
||||
/* DSC v1.2 YUV420 */
|
||||
{0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10},
|
||||
@@ -73,6 +75,7 @@ static char sde_dsc_rc_range_max_qp[DSC_RATIO_TYPE_MAX][DSC_NUM_BUF_RANGES] = {
|
||||
{3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12},
|
||||
{2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11},
|
||||
{7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16},
|
||||
{2, 5, 7, 8, 9, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15},
|
||||
{2, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13},
|
||||
/* DSC v1.2 YUV420 */
|
||||
{2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 12},
|
||||
@@ -96,6 +99,7 @@ static char sde_dsc_rc_range_bpg[DSC_RATIO_TYPE_MAX][DSC_NUM_BUF_RANGES] = {
|
||||
{2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12},
|
||||
{2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12},
|
||||
{2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12},
|
||||
{2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12},
|
||||
{10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12},
|
||||
/* DSC v1.2 YUV420 */
|
||||
{2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12},
|
||||
@@ -125,6 +129,7 @@ static struct sde_dsc_rc_init_params_lut {
|
||||
{11, 11, 5632, 410, 0, 0, 3, 12}, /* DSC_V12_422_8BPC_7BPP */
|
||||
{11, 11, 2048, 341, 0, 0, 3, 12}, /* DSC_V12_422_8BPC_8BPP */
|
||||
{15, 15, 5632, 410, 0, 0, 7, 16}, /* DSC_V12_422_10BPC_7BPP */
|
||||
{15, 15, 2048, 341, 0, 0, 7, 16}, /* DSC_V12_422_10BPC_8BPP */
|
||||
{15, 15, 2048, 273, 0, 0, 7, 16}, /* DSC_V12_422_10BPC_10BPP */
|
||||
/* DSC v1.2 YUV420 */
|
||||
{11, 11, 5632, 410, 0, 0, 3, 12}, /* DSC_V12_422_8BPC_7BPP */
|
||||
@@ -161,6 +166,7 @@ static struct sde_dsc_table_index_lut {
|
||||
{MSM_CHROMA_422, -1, 2, 8, 7, DSC_V12_422_8BPC_7BPP},
|
||||
{MSM_CHROMA_422, -1, 2, 8, 8, DSC_V12_422_8BPC_8BPP},
|
||||
{MSM_CHROMA_422, -1, 2, 10, 7, DSC_V12_422_10BPC_7BPP},
|
||||
{MSM_CHROMA_422, -1, 2, 10, 8, DSC_V12_422_10BPC_8BPP},
|
||||
{MSM_CHROMA_422, -1, 2, 10, 10, DSC_V12_422_10BPC_10BPP},
|
||||
|
||||
{MSM_CHROMA_420, -1, 2, 8, 6, DSC_V12_420_8BPC_6BPP},
|
||||
|
@@ -61,6 +61,10 @@
|
||||
|
||||
#define DEFAULT_PANEL_MIN_V_PREFILL 35
|
||||
|
||||
/* add 10ms constant for low fps cases and use default timeout for existing cases */
|
||||
#define RSC_VSYNC_TIMEOUT_MS(x) ((x && x->cmd_config.fps < 30) ? \
|
||||
((1000 / x->cmd_config.fps) + 10) : PRIMARY_VBLANK_WORST_CASE_MS)
|
||||
|
||||
static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
|
||||
static struct device *rpmh_dev[MAX_RSC_COUNT];
|
||||
|
||||
@@ -168,7 +172,7 @@ void sde_rsc_client_destroy(struct sde_rsc_client *client)
|
||||
SDE_EVT32(client->id, state, rsc->current_state,
|
||||
client->crtc_id, wait_vblank_crtc_id,
|
||||
SDE_EVTLOG_ERROR);
|
||||
msleep(PRIMARY_VBLANK_WORST_CASE_MS);
|
||||
msleep(RSC_VSYNC_TIMEOUT_MS(rsc));
|
||||
}
|
||||
}
|
||||
mutex_lock(&rsc->client_lock);
|
||||
@@ -311,7 +315,7 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
|
||||
|
||||
default_prefill_lines = (rsc->cmd_config.fps *
|
||||
DEFAULT_PANEL_MIN_V_PREFILL) / DEFAULT_PANEL_FPS;
|
||||
if ((state == SDE_RSC_CMD_STATE) || !rsc->cmd_config.prefill_lines)
|
||||
if (!rsc->cmd_config.prefill_lines)
|
||||
rsc->cmd_config.prefill_lines = default_prefill_lines;
|
||||
|
||||
pr_debug("frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
|
||||
@@ -332,12 +336,7 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
|
||||
line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
|
||||
prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
|
||||
|
||||
/* only take jitter into account for CMD mode */
|
||||
if (state == SDE_RSC_CMD_STATE)
|
||||
total = frame_time_ns - frame_jitter - prefill_time_ns;
|
||||
else
|
||||
total = frame_time_ns - prefill_time_ns;
|
||||
|
||||
if (total < 0) {
|
||||
pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
|
||||
frame_time_ns, frame_jitter, prefill_time_ns);
|
||||
@@ -370,9 +369,8 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
|
||||
rsc_time_slot_0_ns = div_u64(rsc_time_slot_0_ns, cxo_period_ns);
|
||||
rsc->timer_config.rsc_time_slot_0_ns = (u32) rsc_time_slot_0_ns;
|
||||
|
||||
/* time_slot_1 for mode1 latency */
|
||||
rsc_time_slot_1_ns = frame_time_ns;
|
||||
rsc_time_slot_1_ns = div_u64(rsc_time_slot_1_ns, cxo_period_ns);
|
||||
/* time_slot_1 for mode1 latency - 1 fps */
|
||||
rsc_time_slot_1_ns = div_u64(TICKS_IN_NANO_SECOND, cxo_period_ns);
|
||||
rsc->timer_config.rsc_time_slot_1_ns = (u32) rsc_time_slot_1_ns;
|
||||
|
||||
/* mode 2 is infinite */
|
||||
@@ -543,7 +541,7 @@ vsync_wait:
|
||||
SDE_EVT32(caller_client->id, rsc->current_state,
|
||||
caller_client->crtc_id,
|
||||
wait_vblank_crtc_id, SDE_EVTLOG_ERROR);
|
||||
msleep(PRIMARY_VBLANK_WORST_CASE_MS);
|
||||
msleep(RSC_VSYNC_TIMEOUT_MS(rsc));
|
||||
} else {
|
||||
*wait_vblank_crtc_id = rsc->primary_client->crtc_id;
|
||||
}
|
||||
@@ -590,7 +588,7 @@ static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc,
|
||||
rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL, 0, 0);
|
||||
if (!wait_vblank_crtc_id) {
|
||||
pr_err("invalid crtc id wait pointer provided\n");
|
||||
msleep(PRIMARY_VBLANK_WORST_CASE_MS);
|
||||
msleep(RSC_VSYNC_TIMEOUT_MS(rsc));
|
||||
} else {
|
||||
*wait_vblank_crtc_id = rsc->primary_client->crtc_id;
|
||||
|
||||
@@ -605,7 +603,7 @@ static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc,
|
||||
/* Wait for the vsync, if the refcount is set */
|
||||
rc = wait_event_timeout(rsc->rsc_vsync_waitq,
|
||||
atomic_read(&rsc->rsc_vsync_wait) == 0,
|
||||
msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
|
||||
msecs_to_jiffies(RSC_VSYNC_TIMEOUT_MS(rsc) * 2));
|
||||
if (!rc) {
|
||||
pr_err("Timeout waiting for vsync\n");
|
||||
rc = -ETIMEDOUT;
|
||||
@@ -691,7 +689,7 @@ vsync_wait:
|
||||
SDE_EVT32(caller_client->id, rsc->current_state,
|
||||
caller_client->crtc_id,
|
||||
wait_vblank_crtc_id, SDE_EVTLOG_ERROR);
|
||||
msleep(PRIMARY_VBLANK_WORST_CASE_MS);
|
||||
msleep(RSC_VSYNC_TIMEOUT_MS(rsc));
|
||||
} else {
|
||||
*wait_vblank_crtc_id = rsc->primary_client->crtc_id;
|
||||
}
|
||||
|
在新工单中引用
屏蔽一个用户