Merge "disp: msm: dsi: create generic interface for read poll timeout"

This commit is contained in:
qctecmdr
2021-06-23 04:01:00 -07:00
committed by Gerrit - the friendly Code Review server
19 changed files with 180 additions and 1748 deletions

View File

@@ -180,18 +180,14 @@ msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi_phy.o \
dsi/dsi_pwr.o \
dsi/dsi_phy.o \
dsi/dsi_phy_hw_v2_0.o \
dsi/dsi_phy_hw_v3_0.o \
dsi/dsi_phy_hw_v4_0.o \
dsi/dsi_phy_timing_calc.o \
dsi/dsi_phy_timing_v2_0.o \
dsi/dsi_phy_timing_v3_0.o \
dsi/dsi_phy_timing_v4_0.o \
dsi/dsi_pll.o \
dsi/dsi_pll_5nm.o \
dsi/dsi_ctrl_hw_cmn.o \
dsi/dsi_ctrl_hw_1_4.o \
dsi/dsi_ctrl_hw_2_0.o \
dsi/dsi_ctrl_hw_2_2.o \
dsi/dsi_ctrl.o \
dsi/dsi_catalog.o \

View File

@@ -67,45 +67,6 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.vid_engine_busy = dsi_ctrl_hw_cmn_vid_engine_busy;
switch (version) {
case DSI_CTRL_VERSION_1_4:
ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map;
ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request;
ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit;
ctrl->ops.wait_for_lane_idle =
dsi_ctrl_hw_14_wait_for_lane_idle;
ctrl->ops.ulps_ops.get_lanes_in_ulps =
dsi_ctrl_hw_cmn_get_lanes_in_ulps;
ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable;
ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
ctrl->ops.reg_dump_to_buffer =
dsi_ctrl_hw_14_reg_dump_to_buffer;
ctrl->ops.schedule_dma_cmd = NULL;
ctrl->ops.kickoff_command_non_embedded_mode = NULL;
ctrl->ops.config_clk_gating = NULL;
ctrl->ops.configure_cmddma_window = NULL;
ctrl->ops.reset_trig_ctrl = NULL;
ctrl->ops.log_line_count = NULL;
ctrl->ops.splitlink_cmd_setup = NULL;
break;
case DSI_CTRL_VERSION_2_0:
ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
ctrl->ops.wait_for_lane_idle =
dsi_ctrl_hw_20_wait_for_lane_idle;
ctrl->ops.reg_dump_to_buffer =
dsi_ctrl_hw_20_reg_dump_to_buffer;
ctrl->ops.ulps_ops.ulps_request = NULL;
ctrl->ops.ulps_ops.ulps_exit = NULL;
ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
ctrl->ops.schedule_dma_cmd = NULL;
ctrl->ops.kickoff_command_non_embedded_mode = NULL;
ctrl->ops.config_clk_gating = NULL;
ctrl->ops.configure_cmddma_window = NULL;
ctrl->ops.reset_trig_ctrl = NULL;
ctrl->ops.log_line_count = NULL;
ctrl->ops.splitlink_cmd_setup = NULL;
break;
case DSI_CTRL_VERSION_2_2:
case DSI_CTRL_VERSION_2_3:
case DSI_CTRL_VERSION_2_4:
@@ -173,10 +134,6 @@ int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
switch (version) {
case DSI_CTRL_VERSION_1_4:
dsi_catalog_cmn_init(ctrl, version);
break;
case DSI_CTRL_VERSION_2_0:
case DSI_CTRL_VERSION_2_2:
case DSI_CTRL_VERSION_2_3:
case DSI_CTRL_VERSION_2_4:
@@ -196,34 +153,6 @@ int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
return rc;
}
/**
* dsi_catalog_phy_2_0_init() - catalog init for DSI PHY 14nm
*/
static void dsi_catalog_phy_2_0_init(struct dsi_phy_hw *phy)
{
phy->ops.regulator_enable = dsi_phy_hw_v2_0_regulator_enable;
phy->ops.regulator_disable = dsi_phy_hw_v2_0_regulator_disable;
phy->ops.enable = dsi_phy_hw_v2_0_enable;
phy->ops.disable = dsi_phy_hw_v2_0_disable;
phy->ops.calculate_timing_params =
dsi_phy_hw_calculate_timing_params;
phy->ops.phy_idle_on = dsi_phy_hw_v2_0_idle_on;
phy->ops.phy_idle_off = dsi_phy_hw_v2_0_idle_off;
phy->ops.calculate_timing_params =
dsi_phy_hw_calculate_timing_params;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v2_0;
phy->ops.clamp_ctrl = dsi_phy_hw_v2_0_clamp_ctrl;
phy->ops.dyn_refresh_ops.dyn_refresh_config =
dsi_phy_hw_v2_0_dyn_refresh_config;
phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay =
dsi_phy_hw_v2_0_dyn_refresh_pipe_delay;
phy->ops.dyn_refresh_ops.dyn_refresh_helper =
dsi_phy_hw_v2_0_dyn_refresh_helper;
phy->ops.dyn_refresh_ops.dyn_refresh_trigger_sel = NULL;
phy->ops.dyn_refresh_ops.cache_phy_timings =
dsi_phy_hw_v2_0_cache_phy_timings;
}
/**
* dsi_catalog_phy_3_0_init() - catalog init for DSI PHY 10nm
*/
@@ -329,9 +258,6 @@ int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
dsi_phy_timing_calc_init(phy, version);
switch (version) {
case DSI_PHY_VERSION_2_0:
dsi_catalog_phy_2_0_init(phy);
break;
case DSI_PHY_VERSION_3_0:
dsi_catalog_phy_3_0_init(phy);
break;
@@ -341,9 +267,6 @@ int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
case DSI_PHY_VERSION_4_3:
dsi_catalog_phy_4_0_init(phy);
break;
case DSI_PHY_VERSION_0_0_HPM:
case DSI_PHY_VERSION_0_0_LPM:
case DSI_PHY_VERSION_1_0:
default:
return -ENOTSUPP;
}

View File

@@ -49,8 +49,6 @@ struct dsi_ctrl_list_item {
static LIST_HEAD(dsi_ctrl_list);
static DEFINE_MUTEX(dsi_ctrl_list_lock);
static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
static const enum dsi_ctrl_version dsi_ctrl_v2_2 = DSI_CTRL_VERSION_2_2;
static const enum dsi_ctrl_version dsi_ctrl_v2_3 = DSI_CTRL_VERSION_2_3;
static const enum dsi_ctrl_version dsi_ctrl_v2_4 = DSI_CTRL_VERSION_2_4;
@@ -58,14 +56,6 @@ static const enum dsi_ctrl_version dsi_ctrl_v2_5 = DSI_CTRL_VERSION_2_5;
static const enum dsi_ctrl_version dsi_ctrl_v2_6 = DSI_CTRL_VERSION_2_6;
static const struct of_device_id msm_dsi_of_match[] = {
{
.compatible = "qcom,dsi-ctrl-hw-v1.4",
.data = &dsi_ctrl_v1_4,
},
{
.compatible = "qcom,dsi-ctrl-hw-v2.0",
.data = &dsi_ctrl_v2_0,
},
{
.compatible = "qcom,dsi-ctrl-hw-v2.2",
.data = &dsi_ctrl_v2_2,
@@ -708,18 +698,6 @@ static int dsi_ctrl_init_regmap(struct platform_device *pdev,
DSI_CTRL_DEBUG(ctrl, "map dsi_ctrl registers to %pK\n", ctrl->hw.base);
switch (ctrl->version) {
case DSI_CTRL_VERSION_1_4:
case DSI_CTRL_VERSION_2_0:
ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
if (IS_ERR(ptr)) {
DSI_CTRL_ERR(ctrl, "mmss_misc base address not found\n");
rc = PTR_ERR(ptr);
return rc;
}
ctrl->hw.mmss_misc_base = ptr;
ctrl->hw.disp_cc_base = NULL;
ctrl->hw.mdp_intf_base = NULL;
break;
case DSI_CTRL_VERSION_2_2:
case DSI_CTRL_VERSION_2_3:
case DSI_CTRL_VERSION_2_4:
@@ -1273,46 +1251,6 @@ int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl)
return rc;
}
static void dsi_ctrl_wait_for_video_done(struct dsi_ctrl *dsi_ctrl)
{
u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0, ret;
struct dsi_mode_info *timing;
/**
* No need to wait if the panel is not video mode or
* if DSI controller supports command DMA scheduling or
* if we are sending init commands.
*/
if ((dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE) ||
(dsi_ctrl->version >= DSI_CTRL_VERSION_2_2) ||
(dsi_ctrl->current_state.vid_engine_state !=
DSI_CTRL_ENGINE_ON))
return;
dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
DSI_VIDEO_MODE_FRAME_DONE);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_VIDEO_MODE_FRAME_DONE, NULL);
reinit_completion(&dsi_ctrl->irq_info.vid_frame_done);
ret = wait_for_completion_timeout(
&dsi_ctrl->irq_info.vid_frame_done,
msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
if (ret <= 0)
DSI_CTRL_DEBUG(dsi_ctrl, "wait for video done failed\n");
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_VIDEO_MODE_FRAME_DONE);
timing = &(dsi_ctrl->host_config.video_timing);
v_total = timing->v_sync_width + timing->v_back_porch +
timing->v_front_porch + timing->v_active;
v_blank = timing->v_sync_width + timing->v_back_porch;
fps = timing->refresh_rate;
sleep_ms = CEIL((v_blank * 1000), (v_total * fps)) + 1;
udelay(sleep_ms * 1000);
}
int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl,
u32 cmd_len,
u32 *flags)
@@ -1497,8 +1435,6 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
}
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
dsi_ctrl_wait_for_video_done(dsi_ctrl);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
@@ -3637,7 +3573,6 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
if ((flags & DSI_CTRL_CMD_BROADCAST) &&
(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
dsi_ctrl_wait_for_video_done(dsi_ctrl);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
@@ -3993,8 +3928,7 @@ int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
* playback, display does not recover back after ESD failure.
* Perform a reset if video engine is stuck.
*/
if (!on && (dsi_ctrl->version < DSI_CTRL_VERSION_1_3 ||
vid_eng_busy))
if (!on && vid_eng_busy)
dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
}

View File

@@ -12,6 +12,7 @@
#include <linux/bitmap.h>
#include "dsi_defs.h"
#include "dsi_hw.h"
#define DSI_CTRL_HW_DBG(c, fmt, ...) DRM_DEV_DEBUG(NULL, "[msm-dsi-debug]: DSI_%d: "\
fmt, c ? c->index : -1, ##__VA_ARGS__)
@@ -20,6 +21,18 @@
#define DSI_CTRL_HW_INFO(c, fmt, ...) DRM_DEV_INFO(NULL, "[msm-dsi-info]: DSI_%d: "\
fmt, c ? c->index : -1, ##__VA_ARGS__)
#define DSI_MMSS_MISC_R32(dsi_ctrl_hw, off) DSI_GEN_R32((dsi_ctrl_hw)->mmss_misc_base, off)
#define DSI_MMSS_MISC_W32(dsi_ctrl_hw, off, val) \
DSI_GEN_W32_DEBUG((dsi_ctrl_hw)->mmss_misc_base, (dsi_ctrl_hw)->index, off, val)
#define DSI_DISP_CC_R32(dsi_ctrl_hw, off) DSI_GEN_R32((dsi_ctrl_hw)->disp_cc_base, off)
#define DSI_DISP_CC_W32(dsi_ctrl_hw, off, val) \
DSI_GEN_W32_DEBUG((dsi_ctrl_hw)->disp_cc_base, (dsi_ctrl_hw)->index, off, val)
#define DSI_MDP_INTF_R32(dsi_ctrl_hw, off) DSI_GEN_R32((dsi_ctrl_hw)->mdp_intf_base, off)
#define DSI_MDP_INTF_W32(dsi_ctrl_hw, off, val) \
DSI_GEN_W32_DEBUG((dsi_ctrl_hw)->mdp_intf_base, (dsi_ctrl_hw)->index, off, val)
/**
* Modifier flag for command transmission. If this flag is set, command
* information is programmed to hardware and transmission is not triggered.
@@ -31,9 +44,6 @@
/**
* enum dsi_ctrl_version - version of the dsi host controller
* @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
* @DSI_CTRL_VERSION_1_3: DSI host v1.3 controller
* @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller
* @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller
* @DSI_CTRL_VERSION_2_2: DSI host v2.2 controller
* @DSI_CTRL_VERSION_2_3: DSI host v2.3 controller
* @DSI_CTRL_VERSION_2_4: DSI host v2.4 controller
@@ -43,9 +53,6 @@
*/
enum dsi_ctrl_version {
DSI_CTRL_VERSION_UNKNOWN,
DSI_CTRL_VERSION_1_3,
DSI_CTRL_VERSION_1_4,
DSI_CTRL_VERSION_2_0,
DSI_CTRL_VERSION_2_2,
DSI_CTRL_VERSION_2_3,
DSI_CTRL_VERSION_2_4,

View File

@@ -1,475 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/iopoll.h>
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
#define MMSS_MISC_CLAMP_REG_OFF 0x0014
/**
* dsi_ctrl_hw_14_setup_lane_map() - setup mapping between
* logical and physical lanes
* @ctrl: Pointer to the controller host hardware.
* @lane_map: Structure defining the mapping between DSI logical
* lanes and physical lanes.
*/
void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map)
{
DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, lane_map->lane_map_v1);
DSI_CTRL_HW_DBG(ctrl, "Lane swap setup complete\n");
}
/**
* dsi_ctrl_hw_14_wait_for_lane_idle()
* This function waits for all the active DSI lanes to be idle by polling all
* the FIFO_EMPTY bits and polling he lane status to ensure that all the lanes
* are in stop state. This function assumes that the bus clocks required to
* access the registers are already turned on.
*
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to be stopped.
*
* return: Error code.
*/
int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
int rc = 0, val = 0;
u32 stop_state_mask = 0, fifo_empty_mask = 0;
u32 const sleep_us = 10;
u32 const timeout_us = 100;
if (lanes & DSI_DATA_LANE_0) {
stop_state_mask |= BIT(0);
fifo_empty_mask |= (BIT(12) | BIT(16));
}
if (lanes & DSI_DATA_LANE_1) {
stop_state_mask |= BIT(1);
fifo_empty_mask |= BIT(20);
}
if (lanes & DSI_DATA_LANE_2) {
stop_state_mask |= BIT(2);
fifo_empty_mask |= BIT(24);
}
if (lanes & DSI_DATA_LANE_3) {
stop_state_mask |= BIT(3);
fifo_empty_mask |= BIT(28);
}
DSI_CTRL_HW_DBG(ctrl, "polling for fifo empty, mask=0x%08x\n",
fifo_empty_mask);
rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
(val & fifo_empty_mask), sleep_us, timeout_us);
if (rc) {
DSI_CTRL_HW_ERR(ctrl, "fifo not empty, FIFO_STATUS=0x%08x\n",
val);
goto error;
}
DSI_CTRL_HW_DBG(ctrl, "polling for lanes to be in stop state, mask=0x%08x\n",
stop_state_mask);
rc = readl_poll_timeout(ctrl->base + DSI_LANE_STATUS, val,
(val & stop_state_mask), sleep_us, timeout_us);
if (rc) {
DSI_CTRL_HW_ERR(ctrl, "lanes not in stop state, LANE_STATUS=0x%08x\n",
val);
goto error;
}
error:
return rc;
}
/**
* ulps_request() - request ulps entry for specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to enter ULPS.
*
* Caller should check if lanes are in ULPS mode by calling
* get_lanes_in_ulps() operation.
*/
void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
reg = DSI_R32(ctrl, DSI_LANE_CTRL);
if (lanes & DSI_CLOCK_LANE)
reg |= BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
/*
* ULPS entry request. Wait for short time to make sure
* that the lanes enter ULPS. Recommended as per HPG.
*/
DSI_W32(ctrl, DSI_LANE_CTRL, reg);
usleep_range(100, 110);
DSI_CTRL_HW_DBG(ctrl, "ULPS requested for lanes 0x%x\n", lanes);
}
/**
* ulps_exit() - exit ULPS on specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to exit ULPS.
*
* Caller should check if lanes are in active mode by calling
* get_lanes_in_ulps() operation.
*/
void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
u32 prev_reg = 0;
prev_reg = DSI_R32(ctrl, DSI_LANE_CTRL);
prev_reg &= BIT(24);
if (lanes & DSI_CLOCK_LANE)
reg |= BIT(12);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(8);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(9);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(10);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(11);
/*
* ULPS Exit Request
* Hardware requirement is to wait for at least 1ms
*/
DSI_W32(ctrl, DSI_LANE_CTRL, reg | prev_reg);
usleep_range(1000, 1010);
/*
* Sometimes when exiting ULPS, it is possible that some DSI
* lanes are not in the stop state which could lead to DSI
* commands not going through. To avoid this, force the lanes
* to be in stop state.
*/
DSI_W32(ctrl, DSI_LANE_CTRL, (reg << 8) | prev_reg);
wmb(); /* ensure lanes are put to stop state */
DSI_W32(ctrl, DSI_LANE_CTRL, 0x0 | prev_reg);
wmb(); /* ensure lanes are put to stop state */
DSI_CTRL_HW_DBG(ctrl, "ULPS exit request for lanes=0x%x\n", lanes);
}
/**
* get_lanes_in_ulps() - returns the list of lanes in ULPS mode
* @ctrl: Pointer to the controller host hardware.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
* state. If 0 is returned, all the lanes are active.
*
* Return: List of lanes in ULPS state.
*/
u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
{
u32 reg = 0;
u32 lanes = 0;
reg = DSI_R32(ctrl, DSI_LANE_STATUS);
if (!(reg & BIT(8)))
lanes |= DSI_DATA_LANE_0;
if (!(reg & BIT(9)))
lanes |= DSI_DATA_LANE_1;
if (!(reg & BIT(10)))
lanes |= DSI_DATA_LANE_2;
if (!(reg & BIT(11)))
lanes |= DSI_DATA_LANE_3;
if (!(reg & BIT(12)))
lanes |= DSI_CLOCK_LANE;
DSI_CTRL_HW_DBG(ctrl, "lanes in ulps = 0x%x\n", lanes);
return lanes;
}
/**
* clamp_enable() - enable DSI clamps to keep PHY driving a stable link
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to be clamped.
* @enable_ulps: Boolean to specify if ULPS is enabled in DSI controller
*/
void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool enable_ulps)
{
u32 clamp_reg = 0;
u32 bit_shift = 0;
u32 reg = 0;
if (ctrl->index == 1)
bit_shift = 16;
if (lanes & DSI_CLOCK_LANE) {
clamp_reg |= BIT(9);
if (enable_ulps)
clamp_reg |= BIT(8);
}
if (lanes & DSI_DATA_LANE_0) {
clamp_reg |= BIT(7);
if (enable_ulps)
clamp_reg |= BIT(6);
}
if (lanes & DSI_DATA_LANE_1) {
clamp_reg |= BIT(5);
if (enable_ulps)
clamp_reg |= BIT(4);
}
if (lanes & DSI_DATA_LANE_2) {
clamp_reg |= BIT(3);
if (enable_ulps)
clamp_reg |= BIT(2);
}
if (lanes & DSI_DATA_LANE_3) {
clamp_reg |= BIT(1);
if (enable_ulps)
clamp_reg |= BIT(0);
}
reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
reg |= (clamp_reg << bit_shift);
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
reg |= (BIT(15) << bit_shift); /* Enable clamp */
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
DSI_CTRL_HW_DBG(ctrl, "Clamps enabled for lanes=0x%x\n", lanes);
}
/**
* clamp_disable() - disable DSI clamps
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to have clamps released.
* @disable_ulps: Boolean to specify if ULPS is enabled in DSI controller
*/
void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool disable_ulps)
{
u32 clamp_reg = 0;
u32 bit_shift = 0;
u32 reg = 0;
if (ctrl->index == 1)
bit_shift = 16;
if (lanes & DSI_CLOCK_LANE) {
clamp_reg |= BIT(9);
if (disable_ulps)
clamp_reg |= BIT(8);
}
if (lanes & DSI_DATA_LANE_0) {
clamp_reg |= BIT(7);
if (disable_ulps)
clamp_reg |= BIT(6);
}
if (lanes & DSI_DATA_LANE_1) {
clamp_reg |= BIT(5);
if (disable_ulps)
clamp_reg |= BIT(4);
}
if (lanes & DSI_DATA_LANE_2) {
clamp_reg |= BIT(3);
if (disable_ulps)
clamp_reg |= BIT(2);
}
if (lanes & DSI_DATA_LANE_3) {
clamp_reg |= BIT(1);
if (disable_ulps)
clamp_reg |= BIT(0);
}
clamp_reg |= BIT(15); /* Enable clamp */
clamp_reg <<= bit_shift;
reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
reg &= ~(clamp_reg);
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
DSI_CTRL_HW_DBG(ctrl, "Disable clamps for lanes=%d\n", lanes);
}
#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size)
{
u32 len = 0;
len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HW_VERSION));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA3));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TRIG_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_INT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_SOFT_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_PHY_SW_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VBIF_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AES_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VERSION));
DSI_CTRL_HW_ERR(ctrl, "LLENGTH = %d\n", len);
return len;
}

View File

@@ -1,224 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/iopoll.h>
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map)
{
u32 reg_value = lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4) |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] << 8) |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 12);
DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
DSI_CTRL_HW_DBG(ctrl, "Lane swap setup complete\n");
}
int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl,
u32 lanes)
{
int rc = 0, val = 0;
u32 fifo_empty_mask = 0;
u32 const sleep_us = 10;
u32 const timeout_us = 100;
if (lanes & DSI_DATA_LANE_0)
fifo_empty_mask |= (BIT(12) | BIT(16));
if (lanes & DSI_DATA_LANE_1)
fifo_empty_mask |= BIT(20);
if (lanes & DSI_DATA_LANE_2)
fifo_empty_mask |= BIT(24);
if (lanes & DSI_DATA_LANE_3)
fifo_empty_mask |= BIT(28);
DSI_CTRL_HW_DBG(ctrl, "polling for fifo empty, mask=0x%08x\n",
fifo_empty_mask);
rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
(val & fifo_empty_mask), sleep_us, timeout_us);
if (rc) {
DSI_CTRL_HW_ERR(ctrl, "fifo not empty, FIFO_STATUS=0x%08x\n",
val);
goto error;
}
error:
return rc;
}
#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size)
{
u32 len = 0;
len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HW_VERSION));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA3));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TRIG_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_CMD_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_VIDEO_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_INT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_SOFT_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_PHY_SW_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_CMD_MDP0_32BIT));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_CMD_MDP1_32BIT));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_VIDEO_32BIT));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VBIF_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AES_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VERSION));
DSI_CTRL_HW_ERR(ctrl, "LLENGTH = %d\n", len);
return len;
}

View File

@@ -56,7 +56,7 @@ int dsi_ctrl_hw_22_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl,
DSI_CTRL_HW_DBG(ctrl, "%s: polling for fifo empty, mask=0x%08x\n",
__func__, fifo_empty_mask);
rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
rc = DSI_READ_POLL_TIMEOUT(ctrl, DSI_FIFO_STATUS, val,
(val & fifo_empty_mask), sleep_us, timeout_us);
if (rc) {
DSI_CTRL_HW_ERR(ctrl,
@@ -274,11 +274,10 @@ u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode)
return reg;
if (cmd_mode)
reg = readl_relaxed(ctrl->mdp_intf_base + MDP_INTF_TEAR_OFFSET
reg = DSI_MDP_INTF_R32(ctrl, MDP_INTF_TEAR_OFFSET
+ MDP_INTF_TEAR_LINE_COUNT_OFFSET);
else
reg = readl_relaxed(ctrl->mdp_intf_base
+ MDP_INTF_LINE_COUNT_OFFSET);
reg = DSI_MDP_INTF_R32(ctrl, MDP_INTF_LINE_COUNT_OFFSET);
return reg;
}

View File

@@ -151,6 +151,120 @@ void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl,
DSI_CTRL_HW_DBG(ctrl, "Host configuration complete\n");
}
/**
* ulps_request() - request ulps entry for specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to enter ULPS.
*
* Caller should check if lanes are in ULPS mode by calling
* get_lanes_in_ulps() operation.
*/
void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
reg = DSI_R32(ctrl, DSI_LANE_CTRL);
if (lanes & DSI_CLOCK_LANE)
reg |= BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
/*
* ULPS entry request. Wait for short time to make sure
* that the lanes enter ULPS. Recommended as per HPG.
*/
DSI_W32(ctrl, DSI_LANE_CTRL, reg);
usleep_range(100, 110);
DSI_CTRL_HW_DBG(ctrl, "ULPS requested for lanes 0x%x\n", lanes);
}
/**
* ulps_exit() - exit ULPS on specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to exit ULPS.
*
* Caller should check if lanes are in active mode by calling
* get_lanes_in_ulps() operation.
*/
void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
u32 prev_reg = 0;
prev_reg = DSI_R32(ctrl, DSI_LANE_CTRL);
prev_reg &= BIT(24);
if (lanes & DSI_CLOCK_LANE)
reg |= BIT(12);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(8);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(9);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(10);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(11);
/*
* ULPS Exit Request
* Hardware requirement is to wait for at least 1ms
*/
DSI_W32(ctrl, DSI_LANE_CTRL, reg | prev_reg);
usleep_range(1000, 1010);
/*
* Sometimes when exiting ULPS, it is possible that some DSI
* lanes are not in the stop state which could lead to DSI
* commands not going through. To avoid this, force the lanes
* to be in stop state.
*/
DSI_W32(ctrl, DSI_LANE_CTRL, (reg << 8) | prev_reg);
wmb(); /* ensure lanes are put to stop state */
DSI_W32(ctrl, DSI_LANE_CTRL, 0x0 | prev_reg);
wmb(); /* ensure lanes are put to stop state */
DSI_CTRL_HW_DBG(ctrl, "ULPS exit request for lanes=0x%x\n", lanes);
}
/**
* get_lanes_in_ulps() - returns the list of lanes in ULPS mode
* @ctrl: Pointer to the controller host hardware.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
* state. If 0 is returned, all the lanes are active.
*
* Return: List of lanes in ULPS state.
*/
u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
{
u32 reg = 0;
u32 lanes = 0;
reg = DSI_R32(ctrl, DSI_LANE_STATUS);
if (!(reg & BIT(8)))
lanes |= DSI_DATA_LANE_0;
if (!(reg & BIT(9)))
lanes |= DSI_DATA_LANE_1;
if (!(reg & BIT(10)))
lanes |= DSI_DATA_LANE_2;
if (!(reg & BIT(11)))
lanes |= DSI_DATA_LANE_3;
if (!(reg & BIT(12)))
lanes |= DSI_CLOCK_LANE;
DSI_CTRL_HW_DBG(ctrl, "lanes in ulps = 0x%x\n", lanes);
return lanes;
}
/**
* phy_sw_reset() - perform a soft reset on the PHY.
* @ctrl: Pointer to the controller host hardware.
@@ -958,7 +1072,7 @@ u32 dsi_ctrl_hw_cmn_poll_dma_status(struct dsi_ctrl_hw *ctrl)
u32 const delay_us = 10;
u32 const timeout_us = 5000;
rc = readl_poll_timeout_atomic(ctrl->base + DSI_INT_CTRL, status,
rc = DSI_READ_POLL_TIMEOUT_ATOMIC(ctrl, DSI_INT_CTRL, status,
((status & DSI_CMD_MODE_DMA_DONE) > 0), delay_us, timeout_us);
if (rc) {
DSI_CTRL_HW_DBG(ctrl, "CMD_MODE_DMA_DONE failed\n");
@@ -1662,7 +1776,7 @@ int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl)
u32 const sleep_us = 2 * 1000;
u32 const timeout_us = 200 * 1000;
rc = readl_poll_timeout(ctrl->base + DSI_STATUS, val,
rc = DSI_READ_POLL_TIMEOUT(ctrl, DSI_STATUS, val,
!(val & cmd_mode_mdp_busy_mask), sleep_us, timeout_us);
if (rc)
DSI_CTRL_HW_ERR(ctrl, "timed out waiting for idle\n");
@@ -1703,7 +1817,7 @@ int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl)
u32 const timeout_us = 84000; /* approximately 5 vsyncs */
u32 reg = 0, dyn_refresh_done = BIT(28);
rc = readl_poll_timeout(ctrl->base + DSI_INT_CTRL, reg,
rc = DSI_READ_POLL_TIMEOUT(ctrl, DSI_INT_CTRL, reg,
(reg & dyn_refresh_done), sleep_us, timeout_us);
if (rc) {
DSI_CTRL_HW_ERR(ctrl, "wait4dynamic refresh timedout %d\n", rc);
@@ -1725,7 +1839,7 @@ bool dsi_ctrl_hw_cmn_vid_engine_busy(struct dsi_ctrl_hw *ctrl)
u32 const sleep_us = 1000;
u32 const timeout_us = 50000;
rc = readl_poll_timeout(ctrl->base + DSI_STATUS, reg,
rc = DSI_READ_POLL_TIMEOUT(ctrl, DSI_STATUS, reg,
!(reg & video_engine_busy), sleep_us, timeout_us);
if (rc)
return true;

View File

@@ -4333,10 +4333,7 @@ void dsi_display_update_byte_intf_div(struct dsi_display *display)
config = &display->panel->host_config;
phy_ver = dsi_phy_get_version(m_ctrl->phy);
if (phy_ver <= DSI_PHY_VERSION_2_0)
config->byte_intf_clk_div = 1;
else
config->byte_intf_clk_div = 2;
config->byte_intf_clk_div = 2;
}
static int dsi_display_update_dsi_bitrate(struct dsi_display *display,

View File

@@ -1,57 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_HW_H_
#define _DSI_HW_H_
#include <linux/io.h>
#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
#define DSI_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, (uint32_t)(val)); \
writel_relaxed((val), (dsi_hw)->base + (off)); \
} while (0)
#define DSI_MMSS_MISC_R32(dsi_hw, off) \
readl_relaxed((dsi_hw)->mmss_misc_base + (off))
#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, val); \
writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
} while (0)
#define DSI_MISC_R32(dsi_hw, off) \
readl_relaxed((dsi_hw)->phy_clamp_base + (off))
#define DSI_MISC_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, val); \
writel_relaxed((val), (dsi_hw)->phy_clamp_base + (off)); \
} while (0)
#define DSI_DISP_CC_R32(dsi_hw, off) \
readl_relaxed((dsi_hw)->disp_cc_base + (off))
#define DSI_DISP_CC_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, val); \
writel_relaxed((val), (dsi_hw)->disp_cc_base + (off)); \
} while (0)
#define DSI_GEN_R32(base, offset) readl_relaxed((base) + (offset))
#define DSI_GEN_W32(base, offset, val) writel_relaxed((val), (base) + (offset))
#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
#define DSI_READ_POLL_TIMEOUT(dsi_hw, off, val, cond, delay_us, timeout_us) \
readl_poll_timeout((dsi_hw)->base + (off), (val), (cond), (delay_us), (timeout_us))
#define DSI_READ_POLL_TIMEOUT_ATOMIC_GEN(base, index, off, val, cond, delay_us, timeout_us) \
readl_poll_timeout_atomic((base) + (off), (val), (cond), (delay_us), (timeout_us))
#define DSI_GEN_W32_DEBUG(base, index, offset, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(index), #offset, (uint32_t)(val)); \
DSI_GEN_W32(base, offset, val); \
} while (0)
#define DSI_R32(dsi_hw, off) DSI_GEN_R32((dsi_hw)->base, off)
#define DSI_W32(dsi_hw, off, val) DSI_GEN_W32_DEBUG((dsi_hw)->base, \
(dsi_hw)->index, off, val)
#define DSI_READ_POLL_TIMEOUT_ATOMIC(dsi_hw, off, val, cond, delay_us, timeout_us) \
DSI_READ_POLL_TIMEOUT_ATOMIC_GEN((dsi_hw)->base, (dsi_hw)->index, off, val, cond, delay_us, timeout_us)
#define PLL_CALC_DATA(addr0, addr1, data0, data1) \
(((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
((data0) << 8) | (((addr0)/4) & 0xFF))
#define DSI_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1) \
writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
(base) + (offset))
#define DSI_GEN_R32(base, offset) readl_relaxed(base + (offset))
#define DSI_GEN_W32(base, offset, val) writel_relaxed((val), base + (offset))
DSI_GEN_W32(base, offset, PLL_CALC_DATA(addr0, addr1, data0, data1))
#endif /* _DSI_HW_H_ */

View File

@@ -31,34 +31,6 @@ struct dsi_phy_list_item {
static LIST_HEAD(dsi_phy_list);
static DEFINE_MUTEX(dsi_phy_list_lock);
static const struct dsi_ver_spec_info dsi_phy_v0_0_hpm = {
.version = DSI_PHY_VERSION_0_0_HPM,
.lane_cfg_count = 4,
.strength_cfg_count = 2,
.regulator_cfg_count = 1,
.timing_cfg_count = 8,
};
static const struct dsi_ver_spec_info dsi_phy_v0_0_lpm = {
.version = DSI_PHY_VERSION_0_0_LPM,
.lane_cfg_count = 4,
.strength_cfg_count = 2,
.regulator_cfg_count = 1,
.timing_cfg_count = 8,
};
static const struct dsi_ver_spec_info dsi_phy_v1_0 = {
.version = DSI_PHY_VERSION_1_0,
.lane_cfg_count = 4,
.strength_cfg_count = 2,
.regulator_cfg_count = 1,
.timing_cfg_count = 8,
};
static const struct dsi_ver_spec_info dsi_phy_v2_0 = {
.version = DSI_PHY_VERSION_2_0,
.lane_cfg_count = 4,
.strength_cfg_count = 2,
.regulator_cfg_count = 1,
.timing_cfg_count = 8,
};
static const struct dsi_ver_spec_info dsi_phy_v3_0 = {
.version = DSI_PHY_VERSION_3_0,
.lane_cfg_count = 4,
@@ -100,14 +72,6 @@ static const struct dsi_ver_spec_info dsi_phy_v4_3 = {
};
static const struct of_device_id msm_dsi_phy_of_match[] = {
{ .compatible = "qcom,dsi-phy-v0.0-hpm",
.data = &dsi_phy_v0_0_hpm,},
{ .compatible = "qcom,dsi-phy-v0.0-lpm",
.data = &dsi_phy_v0_0_lpm,},
{ .compatible = "qcom,dsi-phy-v1.0",
.data = &dsi_phy_v1_0,},
{ .compatible = "qcom,dsi-phy-v2.0",
.data = &dsi_phy_v2_0,},
{ .compatible = "qcom,dsi-phy-v3.0",
.data = &dsi_phy_v3_0,},
{ .compatible = "qcom,dsi-phy-v4.0",
@@ -166,18 +130,6 @@ static int dsi_phy_regmap_init(struct platform_device *pdev,
DSI_PHY_DBG(phy, "map dsi_phy registers to %pK\n", phy->hw.base);
switch (phy->ver_info->version) {
case DSI_PHY_VERSION_2_0:
ptr = msm_ioremap(pdev, "phy_clamp_base", phy->name);
if (IS_ERR(ptr))
phy->hw.phy_clamp_base = NULL;
else
phy->hw.phy_clamp_base = ptr;
break;
default:
break;
}
return rc;
}

View File

@@ -7,6 +7,7 @@
#define _DSI_PHY_HW_H_
#include "dsi_defs.h"
#include "dsi_hw.h"
#define DSI_MAX_SETTINGS 8
#define DSI_PHY_TIMING_V3_SIZE 12
@@ -21,13 +22,13 @@
#define DSI_PHY_WARN(p, fmt, ...) DRM_WARN("[msm-dsi-warn]: DSI_%d: " fmt,\
p ? p->index : -1, ##__VA_ARGS__)
#define DSI_MISC_R32(dsi_phy_hw, off) DSI_GEN_R32((dsi_phy_hw)->phy_clamp_base, off)
#define DSI_MISC_W32(dsi_phy_hw, off, val) \
DSI_GEN_W32_DEBUG((dsi_phy_hw)->phy_clamp_base, (dsi_phy_hw)->index, off, val)
/**
* enum dsi_phy_version - DSI PHY version enumeration
* @DSI_PHY_VERSION_UNKNOWN: Unknown version.
* @DSI_PHY_VERSION_0_0_HPM: 28nm-HPM.
* @DSI_PHY_VERSION_0_0_LPM: 28nm-HPM.
* @DSI_PHY_VERSION_1_0: 20nm
* @DSI_PHY_VERSION_2_0: 14nm
* @DSI_PHY_VERSION_3_0: 10nm
* @DSI_PHY_VERSION_4_0: 7nm
* @DSI_PHY_VERSION_4_1: 7nm
@@ -37,10 +38,6 @@
*/
enum dsi_phy_version {
DSI_PHY_VERSION_UNKNOWN,
DSI_PHY_VERSION_0_0_HPM, /* 28nm-HPM */
DSI_PHY_VERSION_0_0_LPM, /* 28nm-LPM */
DSI_PHY_VERSION_1_0, /* 20nm */
DSI_PHY_VERSION_2_0, /* 14nm */
DSI_PHY_VERSION_3_0, /* 10nm */
DSI_PHY_VERSION_4_0, /* 7nm */
DSI_PHY_VERSION_4_1, /* 7nm */

View File

@@ -1,636 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/math64.h>
#include <linux/delay.h>
#include "dsi_hw.h"
#include "dsi_phy_hw.h"
#define DSIPHY_CMN_REVISION_ID0 0x0000
#define DSIPHY_CMN_REVISION_ID1 0x0004
#define DSIPHY_CMN_REVISION_ID2 0x0008
#define DSIPHY_CMN_REVISION_ID3 0x000C
#define DSIPHY_CMN_CLK_CFG0 0x0010
#define DSIPHY_CMN_CLK_CFG1 0x0014
#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
#define DSIPHY_CMN_CTRL_0 0x001C
#define DSIPHY_CMN_CTRL_1 0x0020
#define DSIPHY_CMN_CAL_HW_TRIGGER 0x0024
#define DSIPHY_CMN_CAL_SW_CFG0 0x0028
#define DSIPHY_CMN_CAL_SW_CFG1 0x002C
#define DSIPHY_CMN_CAL_SW_CFG2 0x0030
#define DSIPHY_CMN_CAL_HW_CFG0 0x0034
#define DSIPHY_CMN_CAL_HW_CFG1 0x0038
#define DSIPHY_CMN_CAL_HW_CFG2 0x003C
#define DSIPHY_CMN_CAL_HW_CFG3 0x0040
#define DSIPHY_CMN_CAL_HW_CFG4 0x0044
#define DSIPHY_CMN_PLL_CNTRL 0x0048
#define DSIPHY_CMN_LDO_CNTRL 0x004C
#define DSIPHY_CMN_REGULATOR_CAL_STATUS0 0x0064
#define DSIPHY_CMN_REGULATOR_CAL_STATUS1 0x0068
#define DSI_MDP_ULPS_CLAMP_ENABLE_OFF 0x0054
/* n = 0..3 for data lanes and n = 4 for clock lane
* t for count per lane
*/
#define DSIPHY_DLNX_CFG(n, t) \
(0x100 + ((t) * 0x04) + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL(n, t) \
(0x118 + ((t) * 0x04) + ((n) * 0x80))
#define DSIPHY_DLNX_STRENGTH_CTRL(n, t) \
(0x138 + ((t) * 0x04) + ((n) * 0x80))
#define DSIPHY_DLNX_TEST_DATAPATH(n) (0x110 + ((n) * 0x80))
#define DSIPHY_DLNX_TEST_STR(n) (0x114 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_POLY(n) (0x140 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_SEED0(n) (0x144 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_SEED1(n) (0x148 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_HEAD(n) (0x14C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_SOT(n) (0x150 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL0(n) (0x154 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL1(n) (0x158 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL2(n) (0x15C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL3(n) (0x160 + ((n) * 0x80))
#define DSIPHY_DLNX_VREG_CNTRL(n) (0x164 + ((n) * 0x80))
#define DSIPHY_DLNX_HSTX_STR_STATUS(n) (0x168 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS0(n) (0x16C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS1(n) (0x170 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS2(n) (0x174 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS3(n) (0x178 + ((n) * 0x80))
#define DSIPHY_DLNX_MISR_STATUS(n) (0x17C + ((n) * 0x80))
#define DSIPHY_PLL_CLKBUFLR_EN 0x041C
#define DSIPHY_PLL_PLL_BANDGAP 0x0508
/* dynamic refresh control registers */
#define DSI_DYN_REFRESH_CTRL 0x000
#define DSI_DYN_REFRESH_PIPE_DELAY 0x004
#define DSI_DYN_REFRESH_PIPE_DELAY2 0x008
#define DSI_DYN_REFRESH_PLL_DELAY 0x00C
#define DSI_DYN_REFRESH_STATUS 0x010
#define DSI_DYN_REFRESH_PLL_CTRL0 0x014
#define DSI_DYN_REFRESH_PLL_CTRL1 0x018
#define DSI_DYN_REFRESH_PLL_CTRL2 0x01C
#define DSI_DYN_REFRESH_PLL_CTRL3 0x020
#define DSI_DYN_REFRESH_PLL_CTRL4 0x024
#define DSI_DYN_REFRESH_PLL_CTRL5 0x028
#define DSI_DYN_REFRESH_PLL_CTRL6 0x02C
#define DSI_DYN_REFRESH_PLL_CTRL7 0x030
#define DSI_DYN_REFRESH_PLL_CTRL8 0x034
#define DSI_DYN_REFRESH_PLL_CTRL9 0x038
#define DSI_DYN_REFRESH_PLL_CTRL10 0x03C
#define DSI_DYN_REFRESH_PLL_CTRL11 0x040
#define DSI_DYN_REFRESH_PLL_CTRL12 0x044
#define DSI_DYN_REFRESH_PLL_CTRL13 0x048
#define DSI_DYN_REFRESH_PLL_CTRL14 0x04C
#define DSI_DYN_REFRESH_PLL_CTRL15 0x050
#define DSI_DYN_REFRESH_PLL_CTRL16 0x054
#define DSI_DYN_REFRESH_PLL_CTRL17 0x058
#define DSI_DYN_REFRESH_PLL_CTRL18 0x05C
#define DSI_DYN_REFRESH_PLL_CTRL19 0x060
#define DSI_DYN_REFRESH_PLL_CTRL20 0x064
#define DSI_DYN_REFRESH_PLL_CTRL21 0x068
#define DSI_DYN_REFRESH_PLL_CTRL22 0x06C
#define DSI_DYN_REFRESH_PLL_CTRL23 0x070
#define DSI_DYN_REFRESH_PLL_CTRL24 0x074
#define DSI_DYN_REFRESH_PLL_CTRL25 0x078
#define DSI_DYN_REFRESH_PLL_CTRL26 0x07C
#define DSI_DYN_REFRESH_PLL_CTRL27 0x080
#define DSI_DYN_REFRESH_PLL_CTRL28 0x084
#define DSI_DYN_REFRESH_PLL_CTRL29 0x088
#define DSI_DYN_REFRESH_PLL_CTRL30 0x08C
#define DSI_DYN_REFRESH_PLL_CTRL31 0x090
#define DSI_DYN_REFRESH_PLL_UPPER_ADDR 0x094
#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2 0x098
#define DSIPHY_DLN0_CFG1 0x0104
#define DSIPHY_DLN0_TIMING_CTRL_4 0x0118
#define DSIPHY_DLN0_TIMING_CTRL_5 0x011C
#define DSIPHY_DLN0_TIMING_CTRL_6 0x0120
#define DSIPHY_DLN0_TIMING_CTRL_7 0x0124
#define DSIPHY_DLN0_TIMING_CTRL_8 0x0128
#define DSIPHY_DLN1_CFG1 0x0184
#define DSIPHY_DLN1_TIMING_CTRL_4 0x0198
#define DSIPHY_DLN1_TIMING_CTRL_5 0x019C
#define DSIPHY_DLN1_TIMING_CTRL_6 0x01A0
#define DSIPHY_DLN1_TIMING_CTRL_7 0x01A4
#define DSIPHY_DLN1_TIMING_CTRL_8 0x01A8
#define DSIPHY_DLN2_CFG1 0x0204
#define DSIPHY_DLN2_TIMING_CTRL_4 0x0218
#define DSIPHY_DLN2_TIMING_CTRL_5 0x021C
#define DSIPHY_DLN2_TIMING_CTRL_6 0x0220
#define DSIPHY_DLN2_TIMING_CTRL_7 0x0224
#define DSIPHY_DLN2_TIMING_CTRL_8 0x0228
#define DSIPHY_DLN3_CFG1 0x0284
#define DSIPHY_DLN3_TIMING_CTRL_4 0x0298
#define DSIPHY_DLN3_TIMING_CTRL_5 0x029C
#define DSIPHY_DLN3_TIMING_CTRL_6 0x02A0
#define DSIPHY_DLN3_TIMING_CTRL_7 0x02A4
#define DSIPHY_DLN3_TIMING_CTRL_8 0x02A8
#define DSIPHY_CKLN_CFG1 0x0304
#define DSIPHY_CKLN_TIMING_CTRL_4 0x0318
#define DSIPHY_CKLN_TIMING_CTRL_5 0x031C
#define DSIPHY_CKLN_TIMING_CTRL_6 0x0320
#define DSIPHY_CKLN_TIMING_CTRL_7 0x0324
#define DSIPHY_CKLN_TIMING_CTRL_8 0x0328
#define DSIPHY_PLL_RESETSM_CNTRL5 0x043c
/**
* regulator_enable() - enable regulators for DSI PHY
* @phy: Pointer to DSI PHY hardware object.
* @reg_cfg: Regulator configuration for all DSI lanes.
*/
void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *reg_cfg)
{
int i;
bool is_split_link = test_bit(DSI_PHY_SPLIT_LINK, phy->feature_map);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
if (is_split_link)
DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(DSI_LOGICAL_CLOCK_LANE+1),
reg_cfg->lane[DSI_LOGICAL_CLOCK_LANE][0]);
/* make sure all values are written to hardware */
wmb();
DSI_PHY_DBG(phy, "Phy regulators enabled\n");
}
/**
* regulator_disable() - disable regulators
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy)
{
DSI_PHY_DBG(phy, "Phy regulators disabled\n");
}
/**
* enable() - Enable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int i, j;
struct dsi_phy_per_lane_cfgs *lanecfg = &cfg->lanecfg;
struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
struct dsi_phy_per_lane_cfgs *strength = &cfg->strength;
u32 data;
bool is_split_link = test_bit(DSI_PHY_SPLIT_LINK, phy->feature_map);
DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
for (j = 0; j < lanecfg->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_CFG(i, j),
lanecfg->lane[i][j]);
DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
for (j = 0; j < timing->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL(i, j),
timing->lane[i][j]);
for (j = 0; j < strength->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL(i, j),
strength->lane[i][j]);
}
if (is_split_link) {
i = DSI_LOGICAL_CLOCK_LANE;
for (j = 0; j < lanecfg->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_CFG(i+1, j),
lanecfg->lane[i][j]);
DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i+1), 0x0);
DSI_W32(phy, DSIPHY_DLNX_TEST_DATAPATH(i+1), 0x88);
for (j = 0; j < timing->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL(i+1, j),
timing->lane[i][j]);
for (j = 0; j < strength->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL(i+1, j),
strength->lane[i][j]);
/* enable split link for cmn clk cfg1 */
data = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
data |= BIT(1);
DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, data);
}
/* make sure all values are written to hardware before enabling phy */
wmb();
DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
udelay(100);
DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
switch (cfg->pll_source) {
case DSI_PLL_SOURCE_STANDALONE:
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
data &= ~BIT(2);
break;
case DSI_PLL_SOURCE_NATIVE:
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
data &= ~BIT(2);
break;
case DSI_PLL_SOURCE_NON_NATIVE:
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
data |= BIT(2);
break;
default:
break;
}
DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
/* Enable bias current for pll1 during split display case */
if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
DSI_PHY_DBG(phy, "Phy enabled\n");
}
/**
* disable() - Disable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
DSI_PHY_DBG(phy, "Phy disabled\n");
}
/**
* dsi_phy_hw_v2_0_idle_on() - Enable DSI PHY hardware during idle screen
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg)
{
int i = 0, j;
struct dsi_phy_per_lane_cfgs *strength = &cfg->strength;
bool is_split_link = test_bit(DSI_PHY_SPLIT_LINK, phy->feature_map);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
for (j = 0; j < strength->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL(i, j),
strength->lane[i][j]);
}
if (is_split_link) {
i = DSI_LOGICAL_CLOCK_LANE;
for (j = 0; j < strength->count_per_lane; j++)
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL(i+1, j),
strength->lane[i][j]);
}
wmb(); /* make sure write happens */
DSI_PHY_DBG(phy, "Phy enabled out of idle screen\n");
}
/**
* dsi_phy_hw_v2_0_idle_off() - Disable DSI PHY hardware during idle screen
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy)
{
int i = 0;
bool is_split_link = test_bit(DSI_PHY_SPLIT_LINK, phy->feature_map);
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), 0x1c);
if (is_split_link)
DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(DSI_LOGICAL_CLOCK_LANE+1),
0x1c);
DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL(i, 1), 0x0);
if (is_split_link)
DSI_W32(phy,
DSIPHY_DLNX_STRENGTH_CTRL(DSI_LOGICAL_CLOCK_LANE+1, 1), 0x0);
wmb(); /* make sure write happens */
DSI_PHY_DBG(phy, "Phy disabled during idle screen\n");
}
int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size)
{
int i = 0, j = 0;
if (size != (DSI_LANE_MAX * DSI_MAX_SETTINGS)) {
DSI_ERR("Unexpected timing array size %d\n", size);
return -EINVAL;
}
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
for (j = 0; j < DSI_MAX_SETTINGS; j++) {
timing_cfg->lane[i][j] = *timing_val;
timing_val++;
}
}
return 0;
}
void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
{
u32 clamp_reg = 0;
if (!phy->phy_clamp_base) {
DSI_PHY_DBG(phy, "phy_clamp_base NULL\n");
return;
}
if (enable) {
clamp_reg |= BIT(0);
DSI_MISC_W32(phy, DSI_MDP_ULPS_CLAMP_ENABLE_OFF,
clamp_reg);
DSI_PHY_DBG(phy, "clamp enabled\n");
} else {
clamp_reg &= ~BIT(0);
DSI_MISC_W32(phy, DSI_MDP_ULPS_CLAMP_ENABLE_OFF,
clamp_reg);
DSI_PHY_DBG(phy, "clamp disabled\n");
}
}
void dsi_phy_hw_v2_0_dyn_refresh_config(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, bool is_master)
{
u32 glbl_tst_cntrl;
if (is_master) {
glbl_tst_cntrl = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
DSIPHY_CMN_GLBL_TEST_CTRL,
DSIPHY_PLL_PLL_BANDGAP,
glbl_tst_cntrl | BIT(1), 0x1);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
DSIPHY_PLL_RESETSM_CNTRL5,
DSIPHY_PLL_PLL_BANDGAP, 0x0D, 0x03);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
DSIPHY_PLL_RESETSM_CNTRL5,
DSIPHY_CMN_PLL_CNTRL, 0x1D, 0x00);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
DSIPHY_CMN_CTRL_1, DSIPHY_DLN0_CFG1, 0x20, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
DSIPHY_DLN1_CFG1, DSIPHY_DLN2_CFG1, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
DSIPHY_DLN3_CFG1, DSIPHY_CKLN_CFG1, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
DSIPHY_DLN0_TIMING_CTRL_4,
DSIPHY_DLN1_TIMING_CTRL_4,
cfg->timing.lane[0][0], cfg->timing.lane[1][0]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
DSIPHY_DLN2_TIMING_CTRL_4,
DSIPHY_DLN3_TIMING_CTRL_4,
cfg->timing.lane[2][0], cfg->timing.lane[3][0]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
DSIPHY_CKLN_TIMING_CTRL_4,
DSIPHY_DLN0_TIMING_CTRL_5,
cfg->timing.lane[4][0], cfg->timing.lane[0][1]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
DSIPHY_DLN1_TIMING_CTRL_5,
DSIPHY_DLN2_TIMING_CTRL_5,
cfg->timing.lane[1][1], cfg->timing.lane[2][1]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
DSIPHY_DLN3_TIMING_CTRL_5,
DSIPHY_CKLN_TIMING_CTRL_5,
cfg->timing.lane[3][1], cfg->timing.lane[4][1]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
DSIPHY_DLN0_TIMING_CTRL_6,
DSIPHY_DLN1_TIMING_CTRL_6,
cfg->timing.lane[0][2], cfg->timing.lane[1][2]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
DSIPHY_DLN2_TIMING_CTRL_6,
DSIPHY_DLN3_TIMING_CTRL_6,
cfg->timing.lane[2][2], cfg->timing.lane[3][2]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
DSIPHY_CKLN_TIMING_CTRL_6,
DSIPHY_DLN0_TIMING_CTRL_7,
cfg->timing.lane[4][2], cfg->timing.lane[0][3]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
DSIPHY_DLN1_TIMING_CTRL_7,
DSIPHY_DLN2_TIMING_CTRL_7,
cfg->timing.lane[1][3], cfg->timing.lane[2][3]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
DSIPHY_DLN3_TIMING_CTRL_7,
DSIPHY_CKLN_TIMING_CTRL_7,
cfg->timing.lane[3][3], cfg->timing.lane[4][3]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base,
DSI_DYN_REFRESH_PLL_CTRL16,
DSIPHY_DLN0_TIMING_CTRL_8,
DSIPHY_DLN1_TIMING_CTRL_8,
cfg->timing.lane[0][4], cfg->timing.lane[1][4]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL17,
DSIPHY_DLN2_TIMING_CTRL_8,
DSIPHY_DLN3_TIMING_CTRL_8,
cfg->timing.lane[2][4], cfg->timing.lane[3][4]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL18,
DSIPHY_CKLN_TIMING_CTRL_8, DSIPHY_CMN_CTRL_1,
cfg->timing.lane[4][4], 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL30,
DSIPHY_CMN_GLBL_TEST_CTRL,
DSIPHY_CMN_GLBL_TEST_CTRL,
((glbl_tst_cntrl) & (~BIT(2))),
((glbl_tst_cntrl) & (~BIT(2))));
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL31,
DSIPHY_CMN_GLBL_TEST_CTRL,
DSIPHY_CMN_GLBL_TEST_CTRL,
((glbl_tst_cntrl) & (~BIT(2))),
((glbl_tst_cntrl) & (~BIT(2))));
} else {
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
DSIPHY_DLN0_CFG1, DSIPHY_DLN1_CFG1, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
DSIPHY_DLN2_CFG1, DSIPHY_DLN3_CFG1, 0x0, 0x0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
DSIPHY_CKLN_CFG1, DSIPHY_DLN0_TIMING_CTRL_4,
0x0, cfg->timing.lane[0][0]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
DSIPHY_DLN1_TIMING_CTRL_4,
DSIPHY_DLN2_TIMING_CTRL_4,
cfg->timing.lane[1][0], cfg->timing.lane[2][0]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
DSIPHY_DLN3_TIMING_CTRL_4,
DSIPHY_CKLN_TIMING_CTRL_4,
cfg->timing.lane[3][0], cfg->timing.lane[4][0]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
DSIPHY_DLN0_TIMING_CTRL_5,
DSIPHY_DLN1_TIMING_CTRL_5,
cfg->timing.lane[0][1], cfg->timing.lane[1][1]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
DSIPHY_DLN2_TIMING_CTRL_5,
DSIPHY_DLN3_TIMING_CTRL_5,
cfg->timing.lane[2][1], cfg->timing.lane[3][1]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
DSIPHY_CKLN_TIMING_CTRL_5,
DSIPHY_DLN0_TIMING_CTRL_6,
cfg->timing.lane[4][1], cfg->timing.lane[0][2]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
DSIPHY_DLN1_TIMING_CTRL_6,
DSIPHY_DLN2_TIMING_CTRL_6,
cfg->timing.lane[1][2], cfg->timing.lane[2][2]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
DSIPHY_DLN3_TIMING_CTRL_6,
DSIPHY_CKLN_TIMING_CTRL_6,
cfg->timing.lane[3][2], cfg->timing.lane[4][2]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
DSIPHY_DLN0_TIMING_CTRL_7,
DSIPHY_DLN1_TIMING_CTRL_7,
cfg->timing.lane[0][3], cfg->timing.lane[1][3]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
DSIPHY_DLN2_TIMING_CTRL_7,
DSIPHY_DLN3_TIMING_CTRL_7,
cfg->timing.lane[2][3], cfg->timing.lane[3][3]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
DSIPHY_CKLN_TIMING_CTRL_7,
DSIPHY_DLN0_TIMING_CTRL_8,
cfg->timing.lane[4][3], cfg->timing.lane[0][4]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
DSIPHY_DLN1_TIMING_CTRL_8,
DSIPHY_DLN2_TIMING_CTRL_8,
cfg->timing.lane[1][4], cfg->timing.lane[2][4]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
DSIPHY_DLN3_TIMING_CTRL_8,
DSIPHY_CKLN_TIMING_CTRL_8,
cfg->timing.lane[3][4], cfg->timing.lane[4][4]);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL16,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL17,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL18,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL19,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL20,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL21,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL22,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL23,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL24,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL25,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL26,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL27,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL28,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL29,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL30,
0x0110, 0x0110, 0, 0);
DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL31,
0x0110, 0x0110, 0, 0);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_UPPER_ADDR,
0x0);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_UPPER_ADDR2,
0x0);
}
wmb(); /* make sure phy timings are updated*/
}
void dsi_phy_hw_v2_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
struct dsi_dyn_clk_delay *delay)
{
if (!delay)
return;
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
delay->pipe_delay);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
delay->pipe_delay2);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
delay->pll_delay);
}
void dsi_phy_hw_v2_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
{
u32 reg;
/*
* if no offset is mentioned then this means we want to clear
* the dynamic refresh ctrl register which is the last step
* of dynamic refresh sequence.
*/
if (!offset) {
reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
reg &= ~(BIT(0) | BIT(8));
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
wmb(); /* ensure dynamic fps is cleared */
return;
}
if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
reg |= BIT(13);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
}
if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
reg |= BIT(0);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
}
if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
reg |= BIT(8);
DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
wmb(); /* ensure dynamic fps is triggered */
}
}
int dsi_phy_hw_v2_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
u32 *dst, u32 size)
{
int i, j, count = 0;
if (!timings || !dst || !size)
return -EINVAL;
if (size != (DSI_LANE_MAX * DSI_MAX_SETTINGS)) {
pr_err("size mis-match\n");
return -EINVAL;
}
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
for (j = 0; j < DSI_MAX_SETTINGS; j++) {
dst[count] = timings->lane[i][j];
count++;
}
}
return 0;
}

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/math64.h>
@@ -245,8 +245,8 @@ void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy,
DSI_PHY_WARN(phy, "PLL turned on before configuring PHY\n");
/* wait for REFGEN READY */
rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
status, (status & BIT(0)), delay_us, timeout_us);
rc = DSI_READ_POLL_TIMEOUT_ATOMIC(phy, DSIPHY_CMN_PHY_STATUS,
status, (status & BIT(0)), delay_us, timeout_us);
if (rc) {
DSI_PHY_ERR(phy, "Ref gen not ready. Aborting\n");
return;
@@ -364,7 +364,7 @@ int dsi_phy_hw_v3_0_wait_for_lane_idle(
DSI_PHY_DBG(phy, "polling for lanes to be in stop state, mask=0x%08x\n",
stop_state_mask);
rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
rc = DSI_READ_POLL_TIMEOUT(phy, DSIPHY_CMN_LANE_STATUS1, val,
((val & stop_state_mask) == stop_state_mask),
sleep_us, timeout_us);
if (rc) {

View File

@@ -497,7 +497,7 @@ void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
}
/* wait for REFGEN READY */
rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
rc = DSI_READ_POLL_TIMEOUT_ATOMIC(phy, DSIPHY_CMN_PHY_STATUS,
status, (status & BIT(0)), delay_us, timeout_us);
if (rc) {
DSI_PHY_ERR(phy, "Ref gen not ready. Aborting\n");
@@ -583,7 +583,7 @@ int dsi_phy_hw_v4_0_wait_for_lane_idle(
DSI_PHY_DBG(phy, "polling for lanes to be in stop state, mask=0x%08x\n",
stop_state_mask);
rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
rc = DSI_READ_POLL_TIMEOUT(phy, DSIPHY_CMN_LANE_STATUS1, val,
((val & stop_state_mask) == stop_state_mask),
sleep_us, timeout_us);
if (rc) {

View File

@@ -974,22 +974,6 @@ int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
phy->ops.timing_ops = ops;
switch (version) {
case DSI_PHY_VERSION_2_0:
ops->get_default_phy_params =
dsi_phy_hw_v2_0_get_default_phy_params;
ops->calc_clk_zero =
dsi_phy_hw_v2_0_calc_clk_zero;
ops->calc_clk_trail_rec_min =
dsi_phy_hw_v2_0_calc_clk_trail_rec_min;
ops->calc_clk_trail_rec_max =
dsi_phy_hw_v2_0_calc_clk_trail_rec_max;
ops->calc_hs_zero =
dsi_phy_hw_v2_0_calc_hs_zero;
ops->calc_hs_trail =
dsi_phy_hw_v2_0_calc_hs_trail;
ops->update_timing_params =
dsi_phy_hw_v2_0_update_timing_params;
break;
case DSI_PHY_VERSION_3_0:
ops->get_default_phy_params =
dsi_phy_hw_v3_0_get_default_phy_params;
@@ -1025,9 +1009,6 @@ int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
ops->update_timing_params =
dsi_phy_hw_v4_0_update_timing_params;
break;
case DSI_PHY_VERSION_0_0_HPM:
case DSI_PHY_VERSION_0_0_LPM:
case DSI_PHY_VERSION_1_0:
default:
kfree(ops);
return -ENOTSUPP;

View File

@@ -1,118 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "dsi_phy_timing_calc.h"
void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params,
u32 phy_type)
{
params->clk_prep_buf = 50;
params->clk_zero_buf = 2;
params->clk_trail_buf = 30;
params->hs_prep_buf = 50;
params->hs_zero_buf = 10;
params->hs_trail_buf = 30;
params->hs_rqst_buf = 0;
params->hs_exit_buf = 10;
}
int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult)
{
s64 rec_temp2, rec_temp3;
rec_temp2 = (rec_temp1 - (11 * mult));
rec_temp3 = roundup64(div_s64(rec_temp2, 8), mult);
return (div_s64(rec_temp3, mult) - 3);
}
int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult)
{
s64 rec_temp1, rec_temp2, rec_temp3;
rec_temp1 = temp_mul + frac + (3 * mult);
rec_temp2 = div_s64(rec_temp1, 8);
rec_temp3 = roundup64(rec_temp2, mult);
return div_s64(rec_temp3, mult);
}
int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
{
s64 rec_temp2, rec_temp3;
rec_temp2 = temp1 + (3 * mult);
rec_temp3 = rec_temp2 / 8;
return div_s64(rec_temp3, mult);
}
int32_t dsi_phy_hw_v2_0_calc_hs_zero(s64 temp1, s64 mult)
{
s64 rec_temp2, rec_temp3, rec_min;
rec_temp2 = temp1 - (11 * mult);
rec_temp3 = roundup64((rec_temp2 / 8), mult);
rec_min = rec_temp3 - (3 * mult);
return div_s64(rec_min, mult);
}
void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
s64 rec_temp1;
struct timing_entry *t = &desc->hs_trail;
t->rec_min = DIV_ROUND_UP(
((t->mipi_min * clk_params->bitclk_mbps) +
(3 * clk_params->tlpx_numer_ns)),
(8 * clk_params->tlpx_numer_ns));
rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
(3 * clk_params->tlpx_numer_ns));
t->rec_max = DIV_ROUND_UP_ULL(rec_temp1,
(8 * clk_params->tlpx_numer_ns));
}
void dsi_phy_hw_v2_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc, u32 phy_type)
{
int i = 0;
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
timing->lane[i][0] = desc->hs_exit.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][1] = desc->clk_zero.reg_value;
else
timing->lane[i][1] = desc->hs_zero.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][2] = desc->clk_prepare.reg_value;
else
timing->lane[i][2] = desc->hs_prepare.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][3] = desc->clk_trail.reg_value;
else
timing->lane[i][3] = desc->hs_trail.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][4] = desc->hs_rqst_clk.reg_value;
else
timing->lane[i][4] = desc->hs_rqst.reg_value;
timing->lane[i][5] = 0x2;
timing->lane[i][6] = 0x4;
timing->lane[i][7] = 0xA0;
DSI_DEBUG("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
timing->lane[i][1],
timing->lane[i][2],
timing->lane[i][3],
timing->lane[i][4]);
}
timing->count_per_lane = 8;
}

View File

@@ -15,6 +15,7 @@
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
#include "dsi_defs.h"
#include "dsi_hw.h"
#define DSI_PLL_DBG(p, fmt, ...) DRM_DEV_DEBUG(NULL, "[msm-dsi-debug]: DSI_PLL_%d: "\
fmt, p ? p->index : -1, ##__VA_ARGS__)
@@ -25,17 +26,16 @@
#define DSI_PLL_WARN(p, fmt, ...) DRM_WARN("[msm-dsi-warn]: DSI_PLL_%d: "\
fmt, p ? p->index : -1, ##__VA_ARGS__)
#define DSI_PLL_REG_W(base, offset, data) \
writel_relaxed((data), (base) + (offset))
#define DSI_PLL_REG_R(base, offset) readl_relaxed((base) + (offset))
#define DSI_PLL_REG_W(base, offset, data) \
do {\
pr_debug("[DSI_PLL][%s] - [0x%08x]\n", #offset, (uint32_t)(data)); \
DSI_GEN_W32(base, offset, data); \
} while (0)
#define PLL_CALC_DATA(addr0, addr1, data0, data1) \
(((data1) << 24) | ((((addr1) / 4) & 0xFF) << 16) | \
((data0) << 8) | (((addr0) / 4) & 0xFF))
#define DSI_PLL_REG_R(base, offset) DSI_GEN_R32(base, offset)
#define DSI_DYN_PLL_REG_W(base, offset, addr0, addr1, data0, data1) \
writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
(base) + (offset))
DSI_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1)
#define upper_8_bit(x) ((((x) >> 2) & 0x100) >> 8)

View File

@@ -598,7 +598,7 @@ static int dsi_pll_5nm_lock_status(struct dsi_pll_resource *pll)
u32 const delay_us = 100;
u32 const timeout_us = 5000;
rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
rc = DSI_READ_POLL_TIMEOUT_ATOMIC_GEN(pll->pll_base, pll->index, PLL_COMMON_STATUS_ONE,
status,
((status & BIT(0)) > 0),
delay_us,