Display drivers kernel project initial snapshot

This change brings msm display driver including sde,
dp, dsi, rotator, dsi pll and dp pll from base 4.19 kernel
project. It is first source code snapshot from base kernel project.

Change-Id: Iec864c064ce5ea04e170f24414c728684002f284
Signed-off-by: Narendra Muppalla <NarendraM@codeaurora.org>
This commit is contained in:
Narendra Muppalla
2019-04-02 14:23:55 -07:00
parent da3538d49c
commit 3709853456
266 changed files with 196908 additions and 0 deletions

47
NOTICE Normal file
View File

@@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Copyright (C) 2014 Red Hat
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Copyright © 2014 Red Hatt.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/

14
config/konadisp.conf Normal file
View File

@@ -0,0 +1,14 @@
CONFIG_DRM_MSM=y
CONFIG_DRM_MSM_SDE=y
CONFIG_SYNC_FILE=y
CONFIG_DRM_MSM_DSI=y
CONFIG_DRM_MSM_DP=y
CONFIG_QCOM_MDSS_DP_PLL=y
CONFIG_DSI_PARSER=y
CONFIG_DRM_SDE_WB=y
CONFIG_DRM_MSM_REGISTER_LOGGING=y
CONFIG_QCOM_MDSS_PLL=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_DRM_SDE_RSC=y

20
config/konadispconf.h Normal file
View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#define CONFIG_DRM_MSM 1
#define CONFIG_DRM_MSM_SDE 1
#define CONFIG_SYNC_FILE 1
#define CONFIG_DRM_MSM_DSI 1
#define CONFIG_DRM_MSM_DP 1
#define CONFIG_QCOM_MDSS_DP_PLL 1
#define CONFIG_DSI_PARSER 1
#define CONFIG_DRM_SDE_WB 1
#define CONFIG_DRM_MSM_REGISTER_LOGGING 1
#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
#define CONFIG_QCOM_MDSS_PLL 1
#define CONFIG_MSM_SDE_ROTATOR 1
#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1
#define CONFIG_DRM_SDE_RSC 1

115
msm/Makefile Normal file
View File

@@ -0,0 +1,115 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -I$(srctree)/include/drm -I$(srctree)/techpack/display/msm -I$(srctree)/techpack/display/msm/dsi -I$(srctree)/techpack/display/msm/dp
ccflags-y += -I$(srctree)/techpack/display/msm/sde
ccflags-y += -I$(srctree)/techpack/display/rotator
msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_usbpd.o \
dp/dp_parser.o \
dp/dp_power.o \
dp/dp_catalog.o \
dp/dp_catalog_v420.o \
dp/dp_catalog_v200.o \
dp/dp_aux.o \
dp/dp_panel.o \
dp/dp_link.o \
dp/dp_ctrl.o \
dp/dp_audio.o \
dp/dp_debug.o \
dp/dp_hpd.o \
dp/dp_gpio_hpd.o \
dp/dp_lphw_hpd.o \
dp/dp_display.o \
dp/dp_drm.o \
dp/dp_hdcp2p2.o \
dp/dp_mst_drm.o \
msm_drm-$(CONFIG_DRM_MSM_SDE) += sde/sde_crtc.o \
sde/sde_encoder.o \
sde/sde_encoder_phys_vid.o \
sde/sde_encoder_phys_cmd.o \
sde/sde_irq.o \
sde/sde_core_irq.o \
sde/sde_core_perf.o \
sde/sde_rm.o \
sde/sde_kms_utils.o \
sde/sde_kms.o \
sde/sde_plane.o \
sde/sde_connector.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
sde_dbg.o \
sde_dbg_evtlog.o \
sde_io_util.o \
sde/sde_hw_reg_dma_v1_color_proc.o \
sde/sde_hw_color_proc_v4.o \
sde/sde_hw_ad4.o \
sde/sde_hw_uidle.o \
sde_edid_parser.o \
sde_hdcp_1x.o \
sde_hdcp_2x.o \
sde/sde_hw_catalog.o \
sde/sde_hw_cdm.o \
sde/sde_hw_dspp.o \
sde/sde_hw_intf.o \
sde/sde_hw_lm.o \
sde/sde_hw_ctl.o \
sde/sde_hw_util.o \
sde/sde_hw_sspp.o \
sde/sde_hw_wb.o \
sde/sde_hw_pingpong.o \
sde/sde_hw_top.o \
sde/sde_hw_interrupts.o \
sde/sde_hw_vbif.o \
sde/sde_hw_blk.o \
sde/sde_formats.o \
sde_power_handle.o \
sde/sde_hw_color_processing_v1_7.o \
sde/sde_reg_dma.o \
sde/sde_hw_reg_dma_v1.o \
sde/sde_hw_dsc.o \
sde/sde_hw_ds.o \
sde/sde_fence.o \
msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
sde/sde_encoder_phys_wb.o \
msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
sde_rsc_hw.o \
sde_rsc_hw_v3.o \
msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi_phy.o \
dsi/dsi_pwr.o \
dsi/dsi_phy.o \
dsi/dsi_phy_hw_v2_0.o \
dsi/dsi_phy_hw_v3_0.o \
dsi/dsi_phy_hw_v4_0.o \
dsi/dsi_phy_timing_calc.o \
dsi/dsi_phy_timing_v2_0.o \
dsi/dsi_phy_timing_v3_0.o \
dsi/dsi_phy_timing_v4_0.o \
dsi/dsi_ctrl_hw_cmn.o \
dsi/dsi_ctrl_hw_1_4.o \
dsi/dsi_ctrl_hw_2_0.o \
dsi/dsi_ctrl_hw_2_2.o \
dsi/dsi_ctrl.o \
dsi/dsi_catalog.o \
dsi/dsi_drm.o \
dsi/dsi_display.o \
dsi/dsi_panel.o \
dsi/dsi_clk_manager.o \
dsi/dsi_display_test.o \
msm_drm-$(CONFIG_DSI_PARSER) += dsi/dsi_parser.o \
msm_drm-$(CONFIG_DRM_MSM) += \
msm_atomic.o \
msm_fb.o \
msm_iommu.o \
msm_drv.o \
msm_gem.o \
msm_gem_prime.o \
msm_gem_vma.o \
msm_smmu.o \
msm_prop.o \
obj-$(CONFIG_DRM_MSM) += msm_drm.o

854
msm/dp/dp_audio.c Normal file
View File

@@ -0,0 +1,854 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/of_platform.h>
#include <linux/msm_ext_display.h>
#include <drm/drm_dp_helper.h>
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
struct dp_audio_private {
struct platform_device *ext_pdev;
struct platform_device *pdev;
struct dp_catalog_audio *catalog;
struct msm_ext_disp_init_data ext_audio_data;
struct dp_panel *panel;
bool ack_enabled;
bool session_on;
bool engine_on;
u32 channels;
struct completion hpd_comp;
struct workqueue_struct *notify_workqueue;
struct delayed_work notify_delayed_work;
struct mutex ops_lock;
struct dp_audio dp_audio;
atomic_t acked;
};
static u32 dp_audio_get_header(struct dp_catalog_audio *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
catalog->sdp_type = sdp;
catalog->sdp_header = header;
catalog->get_header(catalog);
return catalog->data;
}
static void dp_audio_set_header(struct dp_catalog_audio *catalog,
u32 data,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
{
catalog->sdp_type = sdp;
catalog->sdp_header = header;
catalog->data = data;
catalog->set_header(catalog);
}
static void dp_audio_stream_sdp(struct dp_audio_private *audio)
{
struct dp_catalog_audio *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
value &= 0x0000ffff;
new_value = 0x02;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
value &= 0xffff0000;
new_value = 0x0;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
value &= 0x0000ffff;
new_value = audio->channels - 1;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
{
struct dp_catalog_audio *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
value &= 0x0000ffff;
new_value = 0x1;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
value &= 0xffff0000;
new_value = 0x17;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
value &= 0x0000ffff;
new_value = (0x0 | (0x11 << 2));
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
{
struct dp_catalog_audio *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
value &= 0x0000ffff;
new_value = 0x84;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
value &= 0xffff0000;
new_value = 0x1b;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
value &= 0x0000ffff;
new_value = (0x0 | (0x11 << 2));
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
{
struct dp_catalog_audio *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
value &= 0x0000ffff;
new_value = 0x05;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
value &= 0xffff0000;
new_value = 0x0F;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
value &= 0x0000ffff;
new_value = 0x0;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
}
static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
{
struct dp_catalog_audio *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
/* Config header and parity byte 1 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
value &= 0x0000ffff;
new_value = 0x06;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
value &= 0xffff0000;
new_value = 0x0F;
parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
}
static void dp_audio_setup_sdp(struct dp_audio_private *audio)
{
/* always program stream 0 first before actual stream cfg */
audio->catalog->stream_id = DP_STREAM_0;
audio->catalog->config_sdp(audio->catalog);
if (audio->panel->stream_id == DP_STREAM_1) {
audio->catalog->stream_id = DP_STREAM_1;
audio->catalog->config_sdp(audio->catalog);
}
dp_audio_stream_sdp(audio);
dp_audio_timestamp_sdp(audio);
dp_audio_infoframe_sdp(audio);
dp_audio_copy_management_sdp(audio);
dp_audio_isrc_sdp(audio);
}
static void dp_audio_setup_acr(struct dp_audio_private *audio)
{
u32 select = 0;
struct dp_catalog_audio *catalog = audio->catalog;
switch (audio->dp_audio.bw_code) {
case DP_LINK_BW_1_62:
select = 0;
break;
case DP_LINK_BW_2_7:
select = 1;
break;
case DP_LINK_BW_5_4:
select = 2;
break;
case DP_LINK_BW_8_1:
select = 3;
break;
default:
pr_debug("Unknown link rate\n");
select = 0;
break;
}
catalog->data = select;
catalog->config_acr(catalog);
}
static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
{
struct dp_catalog_audio *catalog = audio->catalog;
catalog->data = enable;
catalog->enable(catalog);
audio->engine_on = enable;
}
static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
{
struct msm_ext_disp_data *ext_data;
struct dp_audio *dp_audio;
if (!pdev) {
pr_err("invalid input\n");
return ERR_PTR(-ENODEV);
}
ext_data = platform_get_drvdata(pdev);
if (!ext_data) {
pr_err("invalid ext disp data\n");
return ERR_PTR(-EINVAL);
}
dp_audio = ext_data->intf_data;
if (!ext_data) {
pr_err("invalid intf data\n");
return ERR_PTR(-EINVAL);
}
return container_of(dp_audio, struct dp_audio_private, dp_audio);
}
static int dp_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params)
{
int rc = 0;
struct dp_audio_private *audio;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
return rc;
}
mutex_lock(&audio->ops_lock);
audio->channels = params->num_of_channels;
if (audio->panel->stream_id >= DP_STREAM_MAX) {
pr_err("invalid stream id: %d\n", audio->panel->stream_id);
rc = -EINVAL;
mutex_unlock(&audio->ops_lock);
return rc;
}
dp_audio_setup_sdp(audio);
dp_audio_setup_acr(audio);
dp_audio_enable(audio, true);
mutex_unlock(&audio->ops_lock);
return rc;
}
static int dp_audio_get_edid_blk(struct platform_device *pdev,
struct msm_ext_disp_audio_edid_blk *blk)
{
int rc = 0;
struct dp_audio_private *audio;
struct sde_edid_ctrl *edid;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
if (!audio->panel || !audio->panel->edid_ctrl) {
pr_err("invalid panel data\n");
rc = -EINVAL;
goto end;
}
edid = audio->panel->edid_ctrl;
blk->audio_data_blk = edid->audio_data_block;
blk->audio_data_blk_size = edid->adb_size;
blk->spk_alloc_data_blk = edid->spkr_alloc_data_block;
blk->spk_alloc_data_blk_size = edid->sadb_size;
end:
return rc;
}
static int dp_audio_get_cable_status(struct platform_device *pdev, u32 vote)
{
int rc = 0;
struct dp_audio_private *audio;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
return audio->session_on;
end:
return rc;
}
static int dp_audio_get_intf_id(struct platform_device *pdev)
{
int rc = 0;
struct dp_audio_private *audio;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
return EXT_DISPLAY_TYPE_DP;
end:
return rc;
}
static void dp_audio_teardown_done(struct platform_device *pdev)
{
struct dp_audio_private *audio;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio))
return;
mutex_lock(&audio->ops_lock);
dp_audio_enable(audio, false);
mutex_unlock(&audio->ops_lock);
atomic_set(&audio->acked, 1);
complete_all(&audio->hpd_comp);
pr_debug("audio engine disabled\n");
}
static int dp_audio_ack_done(struct platform_device *pdev, u32 ack)
{
int rc = 0, ack_hpd;
struct dp_audio_private *audio;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
if (ack & AUDIO_ACK_SET_ENABLE) {
audio->ack_enabled = ack & AUDIO_ACK_ENABLE ?
true : false;
pr_debug("audio ack feature %s\n",
audio->ack_enabled ? "enabled" : "disabled");
goto end;
}
if (!audio->ack_enabled)
goto end;
ack_hpd = ack & AUDIO_ACK_CONNECT;
pr_debug("acknowledging audio (%d)\n", ack_hpd);
if (!audio->engine_on) {
atomic_set(&audio->acked, 1);
complete_all(&audio->hpd_comp);
}
end:
return rc;
}
static int dp_audio_codec_ready(struct platform_device *pdev)
{
int rc = 0;
struct dp_audio_private *audio;
audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
pr_err("invalid input\n");
rc = PTR_ERR(audio);
goto end;
}
queue_delayed_work(audio->notify_workqueue,
&audio->notify_delayed_work, HZ/4);
end:
return rc;
}
static int dp_audio_register_ext_disp(struct dp_audio_private *audio)
{
int rc = 0;
struct device_node *pd = NULL;
const char *phandle = "qcom,ext-disp";
struct msm_ext_disp_init_data *ext;
struct msm_ext_disp_audio_codec_ops *ops;
ext = &audio->ext_audio_data;
ops = &ext->codec_ops;
ext->codec.type = EXT_DISPLAY_TYPE_DP;
ext->codec.ctrl_id = 0;
ext->codec.stream_id = audio->panel->stream_id;
ext->pdev = audio->pdev;
ext->intf_data = &audio->dp_audio;
ops->audio_info_setup = dp_audio_info_setup;
ops->get_audio_edid_blk = dp_audio_get_edid_blk;
ops->cable_status = dp_audio_get_cable_status;
ops->get_intf_id = dp_audio_get_intf_id;
ops->teardown_done = dp_audio_teardown_done;
ops->acknowledge = dp_audio_ack_done;
ops->ready = dp_audio_codec_ready;
if (!audio->pdev->dev.of_node) {
pr_err("cannot find audio dev.of_node\n");
rc = -ENODEV;
goto end;
}
pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0);
if (!pd) {
pr_err("cannot parse %s handle\n", phandle);
rc = -ENODEV;
goto end;
}
audio->ext_pdev = of_find_device_by_node(pd);
if (!audio->ext_pdev) {
pr_err("cannot find %s pdev\n", phandle);
rc = -ENODEV;
goto end;
}
#if defined(CONFIG_MSM_EXT_DISPLAY)
rc = msm_ext_disp_register_intf(audio->ext_pdev, ext);
if (rc)
pr_err("failed to register disp\n");
#endif
end:
if (pd)
of_node_put(pd);
return rc;
}
static int dp_audio_deregister_ext_disp(struct dp_audio_private *audio)
{
int rc = 0;
struct device_node *pd = NULL;
const char *phandle = "qcom,ext-disp";
struct msm_ext_disp_init_data *ext;
ext = &audio->ext_audio_data;
if (!audio->pdev->dev.of_node) {
pr_err("cannot find audio dev.of_node\n");
rc = -ENODEV;
goto end;
}
pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0);
if (!pd) {
pr_err("cannot parse %s handle\n", phandle);
rc = -ENODEV;
goto end;
}
audio->ext_pdev = of_find_device_by_node(pd);
if (!audio->ext_pdev) {
pr_err("cannot find %s pdev\n", phandle);
rc = -ENODEV;
goto end;
}
#if defined(CONFIG_MSM_EXT_DISPLAY)
rc = msm_ext_disp_deregister_intf(audio->ext_pdev, ext);
if (rc)
pr_err("failed to deregister disp\n");
#endif
end:
return rc;
}
static int dp_audio_notify(struct dp_audio_private *audio, u32 state)
{
int rc = 0;
struct msm_ext_disp_init_data *ext = &audio->ext_audio_data;
atomic_set(&audio->acked, 0);
if (!ext->intf_ops.audio_notify) {
pr_err("audio notify not defined\n");
goto end;
}
reinit_completion(&audio->hpd_comp);
rc = ext->intf_ops.audio_notify(audio->ext_pdev,
&ext->codec, state);
if (rc)
goto end;
if (atomic_read(&audio->acked))
goto end;
rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 4);
if (!rc) {
pr_err("timeout. state=%d err=%d\n", state, rc);
rc = -ETIMEDOUT;
goto end;
}
pr_debug("success\n");
end:
return rc;
}
static int dp_audio_config(struct dp_audio_private *audio, u32 state)
{
int rc = 0;
struct msm_ext_disp_init_data *ext = &audio->ext_audio_data;
if (!ext || !ext->intf_ops.audio_config) {
pr_err("audio_config not defined\n");
goto end;
}
/*
* DP Audio sets default STREAM_0 only, other streams are
* set by audio driver based on the hardware/software support.
*/
if (audio->panel->stream_id == DP_STREAM_0) {
rc = ext->intf_ops.audio_config(audio->ext_pdev,
&ext->codec, state);
if (rc)
pr_err("failed to config audio, err=%d\n", rc);
}
end:
return rc;
}
static int dp_audio_on(struct dp_audio *dp_audio)
{
int rc = 0;
struct dp_audio_private *audio;
struct msm_ext_disp_init_data *ext;
if (!dp_audio) {
pr_err("invalid input\n");
return -EINVAL;
}
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
if (IS_ERR(audio)) {
pr_err("invalid input\n");
return -EINVAL;
}
dp_audio_register_ext_disp(audio);
ext = &audio->ext_audio_data;
audio->session_on = true;
rc = dp_audio_config(audio, EXT_DISPLAY_CABLE_CONNECT);
if (rc)
goto end;
rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
if (rc)
goto end;
pr_debug("success\n");
end:
return rc;
}
static int dp_audio_off(struct dp_audio *dp_audio)
{
int rc = 0;
struct dp_audio_private *audio;
struct msm_ext_disp_init_data *ext;
bool work_pending = false;
if (!dp_audio) {
pr_err("invalid input\n");
return -EINVAL;
}
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
ext = &audio->ext_audio_data;
work_pending = cancel_delayed_work_sync(&audio->notify_delayed_work);
if (work_pending)
pr_debug("pending notification work completed\n");
rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT);
if (rc)
goto end;
pr_debug("success\n");
end:
dp_audio_config(audio, EXT_DISPLAY_CABLE_DISCONNECT);
audio->session_on = false;
audio->engine_on = false;
dp_audio_deregister_ext_disp(audio);
return rc;
}
static void dp_audio_notify_work_fn(struct work_struct *work)
{
struct dp_audio_private *audio;
struct delayed_work *dw = to_delayed_work(work);
audio = container_of(dw, struct dp_audio_private, notify_delayed_work);
dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
}
static int dp_audio_create_notify_workqueue(struct dp_audio_private *audio)
{
audio->notify_workqueue = create_workqueue("sdm_dp_audio_notify");
if (IS_ERR_OR_NULL(audio->notify_workqueue)) {
pr_err("Error creating notify_workqueue\n");
return -EPERM;
}
INIT_DELAYED_WORK(&audio->notify_delayed_work, dp_audio_notify_work_fn);
return 0;
}
static void dp_audio_destroy_notify_workqueue(struct dp_audio_private *audio)
{
if (audio->notify_workqueue)
destroy_workqueue(audio->notify_workqueue);
}
struct dp_audio *dp_audio_get(struct platform_device *pdev,
struct dp_panel *panel,
struct dp_catalog_audio *catalog)
{
int rc = 0;
struct dp_audio_private *audio;
struct dp_audio *dp_audio;
if (!pdev || !panel || !catalog) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
if (!audio) {
rc = -ENOMEM;
goto error;
}
rc = dp_audio_create_notify_workqueue(audio);
if (rc)
goto error_notify_workqueue;
init_completion(&audio->hpd_comp);
audio->pdev = pdev;
audio->panel = panel;
audio->catalog = catalog;
atomic_set(&audio->acked, 0);
dp_audio = &audio->dp_audio;
mutex_init(&audio->ops_lock);
dp_audio->on = dp_audio_on;
dp_audio->off = dp_audio_off;
catalog->init(catalog);
return dp_audio;
error_notify_workqueue:
devm_kfree(&pdev->dev, audio);
error:
return ERR_PTR(rc);
}
void dp_audio_put(struct dp_audio *dp_audio)
{
struct dp_audio_private *audio;
if (!dp_audio)
return;
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
mutex_destroy(&audio->ops_lock);
dp_audio_destroy_notify_workqueue(audio);
devm_kfree(&audio->pdev->dev, audio);
}

70
msm/dp/dp_audio.h Normal file
View File

@@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_AUDIO_H_
#define _DP_AUDIO_H_
#include <linux/platform_device.h>
#include "dp_panel.h"
#include "dp_catalog.h"
/**
* struct dp_audio
* @lane_count: number of lanes configured in current session
* @bw_code: link rate's bandwidth code for current session
*/
struct dp_audio {
u32 lane_count;
u32 bw_code;
/**
* on()
*
* Enables the audio by notifying the user module.
*
* @dp_audio: an instance of struct dp_audio.
*
* Returns the error code in case of failure, 0 in success case.
*/
int (*on)(struct dp_audio *dp_audio);
/**
* off()
*
* Disables the audio by notifying the user module.
*
* @dp_audio: an instance of struct dp_audio.
*
* Returns the error code in case of failure, 0 in success case.
*/
int (*off)(struct dp_audio *dp_audio);
};
/**
* dp_audio_get()
*
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
* @panel: an instance of dp_panel module.
* @catalog: an instance of dp_catalog_audio module.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created dp_module.
*/
struct dp_audio *dp_audio_get(struct platform_device *pdev,
struct dp_panel *panel,
struct dp_catalog_audio *catalog);
/**
* dp_audio_put()
*
* Cleans the dp_audio instance.
*
* @dp_audio: an instance of dp_audio.
*/
void dp_audio_put(struct dp_audio *dp_audio);
#endif /* _DP_AUDIO_H_ */

865
msm/dp/dp_aux.c Normal file
View File

@@ -0,0 +1,865 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/soc/qcom/fsa4480-i2c.h>
#include <linux/usb/usbpd.h>
#include <linux/delay.h>
#include "dp_aux.h"
#define DP_AUX_ENUM_STR(x) #x
enum {
DP_AUX_DATA_INDEX_WRITE = BIT(31),
};
struct dp_aux_private {
struct device *dev;
struct dp_aux dp_aux;
struct dp_catalog_aux *catalog;
struct dp_aux_cfg *cfg;
struct device_node *aux_switch_node;
struct mutex mutex;
struct completion comp;
struct drm_dp_aux drm_aux;
bool cmd_busy;
bool native;
bool read;
bool no_send_addr;
bool no_send_stop;
bool enabled;
u32 offset;
u32 segment;
u32 aux_error_num;
u32 retry_cnt;
atomic_t aborted;
u8 *dpcd;
u8 *edid;
};
#ifdef CONFIG_DYNAMIC_DEBUG
static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
struct drm_dp_aux_msg *msg)
{
char prefix[64];
int i, linelen, remaining = msg->size;
const int rowsize = 16;
u8 linebuf[64];
struct dp_aux_private *aux = container_of(drm_aux,
struct dp_aux_private, drm_aux);
snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ",
aux->native ? "NAT" : "I2C",
aux->read ? "RD" : "WR",
msg->address, msg->size);
for (i = 0; i < msg->size; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
hex_dump_to_buffer(msg->buffer + i, linelen, rowsize, 1,
linebuf, sizeof(linebuf), false);
pr_debug("%s%s\n", prefix, linebuf);
}
}
#else
static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
struct drm_dp_aux_msg *msg)
{
}
#endif
static char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
case DP_AUX_ERR_NONE:
return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
case DP_AUX_ERR_ADDR:
return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
case DP_AUX_ERR_TOUT:
return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
case DP_AUX_ERR_NACK:
return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
case DP_AUX_ERR_DEFER:
return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
case DP_AUX_ERR_NACK_DEFER:
return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
default:
return "unknown";
}
}
static u32 dp_aux_write(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data[4], reg, len;
u8 *msgdata = msg->buffer;
int const aux_cmd_fifo_len = 128;
int i = 0;
if (aux->read)
len = 4;
else
len = msg->size + 4;
/*
* cmd fifo only has depth of 144 bytes
* limit buf length to 128 bytes here
*/
if (len > aux_cmd_fifo_len) {
pr_err("buf len error\n");
return 0;
}
/* Pack cmd and write to HW */
data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
if (aux->read)
data[0] |= BIT(4); /* R/W */
data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
data[2] = msg->address & 0xff; /* addr[7:0] */
data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
for (i = 0; i < len; i++) {
reg = (i < 4) ? data[i] : msgdata[i - 4];
reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
aux->catalog->data = reg;
aux->catalog->write_data(aux->catalog);
}
aux->catalog->clear_trans(aux->catalog, false);
aux->catalog->clear_hw_interrupts(aux->catalog);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
reg |= BIT(8);
if (aux->no_send_addr)
reg |= BIT(10);
if (aux->no_send_stop)
reg |= BIT(11);
}
reg |= BIT(9);
aux->catalog->data = reg;
aux->catalog->write_trans(aux->catalog);
return len;
}
static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 ret = 0, len = 0, timeout;
int const aux_timeout_ms = HZ/4;
reinit_completion(&aux->comp);
len = dp_aux_write(aux, msg);
if (len == 0) {
pr_err("DP AUX write failed\n");
return -EINVAL;
}
timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
if (!timeout) {
pr_err("aux %s timeout\n", (aux->read ? "read" : "write"));
return -ETIMEDOUT;
}
if (aux->aux_error_num == DP_AUX_ERR_NONE) {
ret = len;
} else {
pr_err_ratelimited("aux err: %s\n",
dp_aux_get_error(aux->aux_error_num));
ret = -EINVAL;
}
return ret;
}
static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data;
u8 *dp;
u32 i, actual_i;
u32 len = msg->size;
aux->catalog->clear_trans(aux->catalog, true);
data = 0;
data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= BIT(0); /* read */
aux->catalog->data = data;
aux->catalog->write_data(aux->catalog);
dp = msg->buffer;
/* discard first byte */
data = aux->catalog->read_data(aux->catalog);
for (i = 0; i < len; i++) {
data = aux->catalog->read_data(aux->catalog);
*dp++ = (u8)((data >> 8) & 0xff);
actual_i = (data >> 16) & 0xFF;
if (i != actual_i)
pr_warn("Index mismatch: expected %d, found %d\n",
i, actual_i);
}
}
static void dp_aux_native_handler(struct dp_aux_private *aux)
{
u32 isr = aux->catalog->isr;
if (isr & DP_INTR_AUX_I2C_DONE)
aux->aux_error_num = DP_AUX_ERR_NONE;
else if (isr & DP_INTR_WRONG_ADDR)
aux->aux_error_num = DP_AUX_ERR_ADDR;
else if (isr & DP_INTR_TIMEOUT)
aux->aux_error_num = DP_AUX_ERR_TOUT;
if (isr & DP_INTR_NACK_DEFER)
aux->aux_error_num = DP_AUX_ERR_NACK;
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
aux->catalog->clear_hw_interrupts(aux->catalog);
}
complete(&aux->comp);
}
static void dp_aux_i2c_handler(struct dp_aux_private *aux)
{
u32 isr = aux->catalog->isr;
if (isr & DP_INTR_AUX_I2C_DONE) {
if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
aux->aux_error_num = DP_AUX_ERR_NACK;
else
aux->aux_error_num = DP_AUX_ERR_NONE;
} else {
if (isr & DP_INTR_WRONG_ADDR)
aux->aux_error_num = DP_AUX_ERR_ADDR;
else if (isr & DP_INTR_TIMEOUT)
aux->aux_error_num = DP_AUX_ERR_TOUT;
if (isr & DP_INTR_NACK_DEFER)
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
if (isr & DP_INTR_I2C_NACK)
aux->aux_error_num = DP_AUX_ERR_NACK;
if (isr & DP_INTR_I2C_DEFER)
aux->aux_error_num = DP_AUX_ERR_DEFER;
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
aux->catalog->clear_hw_interrupts(aux->catalog);
}
}
complete(&aux->comp);
}
static void dp_aux_isr(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->catalog->get_irq(aux->catalog, aux->cmd_busy);
if (!aux->cmd_busy)
return;
if (aux->native)
dp_aux_native_handler(aux);
else
dp_aux_i2c_handler(aux);
}
static void dp_aux_reconfig(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->catalog->update_aux_cfg(aux->catalog,
aux->cfg, PHY_AUX_CFG1);
aux->catalog->reset(aux->catalog);
}
static void dp_aux_abort_transaction(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
atomic_set(&aux->aborted, 1);
}
static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
u32 const edid_address = 0x50;
u32 const segment_address = 0x30;
bool i2c_read = input_msg->request &
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
u8 *data = NULL;
if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
(input_msg->address != segment_address)))
return;
data = input_msg->buffer;
if (input_msg->address == segment_address)
aux->segment = *data;
else
aux->offset = *data;
}
/**
* dp_aux_transfer_helper() - helper function for EDID read transactions
*
* @aux: DP AUX private structure
* @input_msg: input message from DRM upstream APIs
* @send_seg: send the seg to sink
*
* return: void
*
* This helper function is used to fix EDID reads for non-compliant
* sinks that do not handle the i2c middle-of-transaction flag correctly.
*/
static void dp_aux_transfer_helper(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg, bool send_seg)
{
struct drm_dp_aux_msg helper_msg;
u32 const message_size = 0x10;
u32 const segment_address = 0x30;
u32 const edid_block_length = 0x80;
bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
bool i2c_read = input_msg->request &
(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
if (!i2c_mot || !i2c_read || (input_msg->size == 0))
return;
/*
* Sending the segment value and EDID offset will be performed
* from the DRM upstream EDID driver for each block. Avoid
* duplicate AUX transactions related to this while reading the
* first 16 bytes of each block.
*/
if (!(aux->offset % edid_block_length) || !send_seg)
goto end;
aux->read = false;
aux->cmd_busy = true;
aux->no_send_addr = true;
aux->no_send_stop = true;
/*
* Send the segment address for i2c reads for segment > 0 and for which
* the middle-of-transaction flag is set. This is required to support
* EDID reads of more than 2 blocks as the segment address is reset to 0
* since we are overriding the middle-of-transaction flag for read
* transactions.
*/
if (aux->segment) {
memset(&helper_msg, 0, sizeof(helper_msg));
helper_msg.address = segment_address;
helper_msg.buffer = &aux->segment;
helper_msg.size = 1;
dp_aux_cmd_fifo_tx(aux, &helper_msg);
}
/*
* Send the offset address for every i2c read in which the
* middle-of-transaction flag is set. This will ensure that the sink
* will update its read pointer and return the correct portion of the
* EDID buffer in the subsequent i2c read trasntion triggered in the
* native AUX transfer function.
*/
memset(&helper_msg, 0, sizeof(helper_msg));
helper_msg.address = input_msg->address;
helper_msg.buffer = &aux->offset;
helper_msg.size = 1;
dp_aux_cmd_fifo_tx(aux, &helper_msg);
end:
aux->offset += message_size;
if (aux->offset == 0x80 || aux->offset == 0x100)
aux->segment = 0x0; /* reset segment at end of block */
}
static int dp_aux_transfer_ready(struct dp_aux_private *aux,
struct drm_dp_aux_msg *msg, bool send_seg)
{
int ret = 0;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
if (atomic_read(&aux->aborted)) {
ret = -ETIMEDOUT;
goto error;
}
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
/* Ignore address only message */
if ((msg->size == 0) || (msg->buffer == NULL)) {
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
goto error;
}
/* msg sanity check */
if ((aux->native && (msg->size > aux_cmd_native_max)) ||
(msg->size > aux_cmd_i2c_max)) {
pr_err("%s: invalid msg: size(%zu), request(%x)\n",
__func__, msg->size, msg->request);
ret = -EINVAL;
goto error;
}
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, send_seg);
aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
if (aux->read) {
aux->no_send_addr = true;
aux->no_send_stop = false;
} else {
aux->no_send_addr = true;
aux->no_send_stop = true;
}
aux->cmd_busy = true;
error:
return ret;
}
static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux,
struct drm_dp_aux_msg *msg)
{
u32 timeout;
ssize_t ret;
struct dp_aux_private *aux = container_of(drm_aux,
struct dp_aux_private, drm_aux);
mutex_lock(&aux->mutex);
ret = dp_aux_transfer_ready(aux, msg, false);
if (ret)
goto end;
aux->aux_error_num = DP_AUX_ERR_NONE;
if (!aux->dpcd || !aux->edid) {
pr_err("invalid aux/dpcd structure\n");
goto end;
}
if ((msg->address + msg->size) > SZ_4K) {
pr_debug("invalid dpcd access: addr=0x%x, size=0x%lx\n",
msg->address, msg->size);
goto address_error;
}
if (aux->native) {
aux->dp_aux.reg = msg->address;
aux->dp_aux.read = aux->read;
aux->dp_aux.size = msg->size;
reinit_completion(&aux->comp);
if (aux->read) {
timeout = wait_for_completion_timeout(&aux->comp, HZ);
if (!timeout) {
pr_err("aux timeout for 0x%x\n", msg->address);
atomic_set(&aux->aborted, 1);
ret = -ETIMEDOUT;
goto end;
}
memcpy(msg->buffer, aux->dpcd + msg->address,
msg->size);
} else {
memcpy(aux->dpcd + msg->address, msg->buffer,
msg->size);
timeout = wait_for_completion_timeout(&aux->comp, HZ);
if (!timeout) {
pr_err("aux timeout for 0x%x\n", msg->address);
atomic_set(&aux->aborted, 1);
ret = -ETIMEDOUT;
goto end;
}
}
aux->aux_error_num = DP_AUX_ERR_NONE;
} else {
if (aux->read && msg->address == 0x50) {
memcpy(msg->buffer,
aux->edid + aux->offset - 16,
msg->size);
}
}
if (aux->aux_error_num == DP_AUX_ERR_NONE) {
dp_aux_hex_dump(drm_aux, msg);
if (!aux->read)
memset(msg->buffer, 0, msg->size);
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
} else {
/* Reply defer to retry */
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
}
ret = msg->size;
goto end;
address_error:
memset(msg->buffer, 0, msg->size);
ret = msg->size;
end:
aux->dp_aux.reg = 0xFFFF;
aux->dp_aux.read = true;
aux->dp_aux.size = 0;
mutex_unlock(&aux->mutex);
return ret;
}
/*
* This function does the real job to process an AUX transaction.
* It will call aux_reset() function to reset the AUX channel,
* if the waiting is timeout.
*/
static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
int const retry_count = 5;
struct dp_aux_private *aux = container_of(drm_aux,
struct dp_aux_private, drm_aux);
mutex_lock(&aux->mutex);
ret = dp_aux_transfer_ready(aux, msg, true);
if (ret)
goto unlock_exit;
if (!aux->cmd_busy) {
ret = msg->size;
goto unlock_exit;
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
if ((ret < 0) && !atomic_read(&aux->aborted)) {
aux->retry_cnt++;
if (!(aux->retry_cnt % retry_count))
aux->catalog->update_aux_cfg(aux->catalog,
aux->cfg, PHY_AUX_CFG1);
aux->catalog->reset(aux->catalog);
goto unlock_exit;
} else if (ret < 0) {
goto unlock_exit;
}
if (aux->aux_error_num == DP_AUX_ERR_NONE) {
if (aux->read)
dp_aux_cmd_fifo_rx(aux, msg);
dp_aux_hex_dump(drm_aux, msg);
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
} else {
/* Reply defer to retry */
msg->reply = aux->native ?
DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
}
/* Return requested size for success or retry */
ret = msg->size;
aux->retry_cnt = 0;
unlock_exit:
aux->cmd_busy = false;
mutex_unlock(&aux->mutex);
return ret;
}
static void dp_aux_reset_phy_config_indices(struct dp_aux_cfg *aux_cfg)
{
int i = 0;
for (i = 0; i < PHY_AUX_CFG_MAX; i++)
aux_cfg[i].current_index = 0;
}
static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg)
{
struct dp_aux_private *aux;
if (!dp_aux || !aux_cfg) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
if (aux->enabled)
return;
dp_aux_reset_phy_config_indices(aux_cfg);
aux->catalog->setup(aux->catalog, aux_cfg);
aux->catalog->reset(aux->catalog);
aux->catalog->enable(aux->catalog, true);
atomic_set(&aux->aborted, 0);
aux->retry_cnt = 0;
aux->enabled = true;
}
static void dp_aux_deinit(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
if (!aux->enabled)
return;
atomic_set(&aux->aborted, 1);
aux->catalog->enable(aux->catalog, false);
aux->enabled = false;
}
static int dp_aux_register(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
int ret = 0;
if (!dp_aux) {
pr_err("invalid input\n");
ret = -EINVAL;
goto exit;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
aux->drm_aux.name = "sde_dp_aux";
aux->drm_aux.dev = aux->dev;
aux->drm_aux.transfer = dp_aux_transfer;
ret = drm_dp_aux_register(&aux->drm_aux);
if (ret) {
pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
goto exit;
}
dp_aux->drm_aux = &aux->drm_aux;
exit:
return ret;
}
static void dp_aux_deregister(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
drm_dp_aux_unregister(&aux->drm_aux);
}
static void dp_aux_dpcd_updated(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
complete(&aux->comp);
}
static void dp_aux_set_sim_mode(struct dp_aux *dp_aux, bool en,
u8 *edid, u8 *dpcd)
{
struct dp_aux_private *aux;
if (!dp_aux) {
pr_err("invalid input\n");
return;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
aux->edid = edid;
aux->dpcd = dpcd;
if (en) {
atomic_set(&aux->aborted, 0);
aux->drm_aux.transfer = dp_aux_transfer_debug;
} else {
aux->drm_aux.transfer = dp_aux_transfer;
}
mutex_unlock(&aux->mutex);
}
static int dp_aux_configure_aux_switch(struct dp_aux *dp_aux,
bool enable, int orientation)
{
struct dp_aux_private *aux;
int rc = 0;
enum fsa_function event = FSA_USBC_DISPLAYPORT_DISCONNECTED;
if (!dp_aux) {
pr_err("invalid input\n");
rc = -EINVAL;
goto end;
}
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
if (!aux->aux_switch_node) {
pr_debug("undefined fsa4480 handle\n");
rc = -EINVAL;
goto end;
}
if (enable) {
switch (orientation) {
case ORIENTATION_CC1:
event = FSA_USBC_ORIENTATION_CC1;
break;
case ORIENTATION_CC2:
event = FSA_USBC_ORIENTATION_CC2;
break;
default:
pr_err("invalid orientation\n");
rc = -EINVAL;
goto end;
}
}
pr_debug("enable=%d, orientation=%d, event=%d\n",
enable, orientation, event);
rc = fsa4480_switch_event(aux->aux_switch_node, event);
if (rc)
pr_err("failed to configure fsa4480 i2c device (%d)\n", rc);
end:
return rc;
}
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
struct dp_parser *parser, struct device_node *aux_switch)
{
int rc = 0;
struct dp_aux_private *aux;
struct dp_aux *dp_aux;
if (!catalog || !parser ||
(!parser->no_aux_switch &&
!aux_switch &&
!parser->gpio_aux_switch)) {
pr_err("invalid input\n");
rc = -ENODEV;
goto error;
}
aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
if (!aux) {
rc = -ENOMEM;
goto error;
}
init_completion(&aux->comp);
aux->cmd_busy = false;
mutex_init(&aux->mutex);
aux->dev = dev;
aux->catalog = catalog;
aux->cfg = parser->aux_cfg;
aux->aux_switch_node = aux_switch;
dp_aux = &aux->dp_aux;
aux->retry_cnt = 0;
aux->dp_aux.reg = 0xFFFF;
dp_aux->isr = dp_aux_isr;
dp_aux->init = dp_aux_init;
dp_aux->deinit = dp_aux_deinit;
dp_aux->drm_aux_register = dp_aux_register;
dp_aux->drm_aux_deregister = dp_aux_deregister;
dp_aux->reconfig = dp_aux_reconfig;
dp_aux->abort = dp_aux_abort_transaction;
dp_aux->dpcd_updated = dp_aux_dpcd_updated;
dp_aux->set_sim_mode = dp_aux_set_sim_mode;
dp_aux->aux_switch = dp_aux_configure_aux_switch;
return dp_aux;
error:
return ERR_PTR(rc);
}
void dp_aux_put(struct dp_aux *dp_aux)
{
struct dp_aux_private *aux;
if (!dp_aux)
return;
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_destroy(&aux->mutex);
devm_kfree(aux->dev, aux);
}

59
msm/dp/dp_aux.h Normal file
View File

@@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_AUX_H_
#define _DP_AUX_H_
#include "dp_catalog.h"
#include "drm_dp_helper.h"
#define DP_STATE_NOTIFICATION_SENT BIT(0)
#define DP_STATE_TRAIN_1_STARTED BIT(1)
#define DP_STATE_TRAIN_1_SUCCEEDED BIT(2)
#define DP_STATE_TRAIN_1_FAILED BIT(3)
#define DP_STATE_TRAIN_2_STARTED BIT(4)
#define DP_STATE_TRAIN_2_SUCCEEDED BIT(5)
#define DP_STATE_TRAIN_2_FAILED BIT(6)
#define DP_STATE_CTRL_POWERED_ON BIT(7)
#define DP_STATE_CTRL_POWERED_OFF BIT(8)
#define DP_STATE_LINK_MAINTENANCE_STARTED BIT(9)
#define DP_STATE_LINK_MAINTENANCE_COMPLETED BIT(10)
#define DP_STATE_LINK_MAINTENANCE_FAILED BIT(11)
enum dp_aux_error {
DP_AUX_ERR_NONE = 0,
DP_AUX_ERR_ADDR = -1,
DP_AUX_ERR_TOUT = -2,
DP_AUX_ERR_NACK = -3,
DP_AUX_ERR_DEFER = -4,
DP_AUX_ERR_NACK_DEFER = -5,
DP_AUX_ERR_PHY = -6,
};
struct dp_aux {
u32 reg;
u32 size;
u32 state;
bool read;
struct drm_dp_aux *drm_aux;
int (*drm_aux_register)(struct dp_aux *aux);
void (*drm_aux_deregister)(struct dp_aux *aux);
void (*isr)(struct dp_aux *aux);
void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg);
void (*deinit)(struct dp_aux *aux);
void (*reconfig)(struct dp_aux *aux);
void (*abort)(struct dp_aux *aux);
void (*dpcd_updated)(struct dp_aux *aux);
void (*set_sim_mode)(struct dp_aux *aux, bool en, u8 *edid, u8 *dpcd);
int (*aux_switch)(struct dp_aux *aux, bool enable, int orientation);
};
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
struct dp_parser *parser, struct device_node *aux_switch);
void dp_aux_put(struct dp_aux *aux);
#endif /*__DP_AUX_H_*/

2680
msm/dp/dp_catalog.c Normal file

File diff suppressed because it is too large Load Diff

359
msm/dp/dp_catalog.h Normal file
View File

@@ -0,0 +1,359 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_CATALOG_H_
#define _DP_CATALOG_H_
#include <drm/msm_drm.h>
#include "dp_parser.h"
/* interrupts */
#define DP_INTR_HPD BIT(0)
#define DP_INTR_AUX_I2C_DONE BIT(3)
#define DP_INTR_WRONG_ADDR BIT(6)
#define DP_INTR_TIMEOUT BIT(9)
#define DP_INTR_NACK_DEFER BIT(12)
#define DP_INTR_WRONG_DATA_CNT BIT(15)
#define DP_INTR_I2C_NACK BIT(18)
#define DP_INTR_I2C_DEFER BIT(21)
#define DP_INTR_PLL_UNLOCKED BIT(24)
#define DP_INTR_AUX_ERROR BIT(27)
#define DP_INTR_READY_FOR_VIDEO BIT(0)
#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
#define DP_INTR_FRAME_END BIT(6)
#define DP_INTR_CRC_UPDATED BIT(9)
#define DP_INTR_MST_DP0_VCPF_SENT BIT(0)
#define DP_INTR_MST_DP1_VCPF_SENT BIT(3)
#define DP_MAX_TIME_SLOTS 64
/* stream id */
enum dp_stream_id {
DP_STREAM_0,
DP_STREAM_1,
DP_STREAM_MAX,
};
struct dp_catalog_hdr_data {
u32 vsc_header_byte0;
u32 vsc_header_byte1;
u32 vsc_header_byte2;
u32 vsc_header_byte3;
u32 vscext_header_byte0;
u32 vscext_header_byte1;
u32 vscext_header_byte2;
u32 vscext_header_byte3;
u32 shdr_header_byte0;
u32 shdr_header_byte1;
u32 shdr_header_byte2;
u32 shdr_header_byte3;
u32 bpc;
u32 version;
u32 length;
u32 pixel_encoding;
u32 colorimetry;
u32 dynamic_range;
u32 content_type;
struct drm_msm_ext_hdr_metadata hdr_meta;
};
struct dp_catalog_aux {
u32 data;
u32 isr;
u32 (*read_data)(struct dp_catalog_aux *aux);
int (*write_data)(struct dp_catalog_aux *aux);
int (*write_trans)(struct dp_catalog_aux *aux);
int (*clear_trans)(struct dp_catalog_aux *aux, bool read);
void (*reset)(struct dp_catalog_aux *aux);
void (*enable)(struct dp_catalog_aux *aux, bool enable);
void (*update_aux_cfg)(struct dp_catalog_aux *aux,
struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type);
void (*setup)(struct dp_catalog_aux *aux,
struct dp_aux_cfg *aux_cfg);
void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
void (*clear_hw_interrupts)(struct dp_catalog_aux *aux);
};
struct dp_catalog_ctrl {
u32 isr;
u32 isr5;
void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt);
void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped,
char *lane_map);
void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap);
void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
void (*reset)(struct dp_catalog_ctrl *ctrl);
void (*usb_reset)(struct dp_catalog_ctrl *ctrl, bool flip);
bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl);
void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable);
void (*phy_reset)(struct dp_catalog_ctrl *ctrl);
void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped,
u8 lane_cnt);
void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
u8 p_level);
void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl);
void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl,
u32 pattern);
u32 (*read_phy_pattern)(struct dp_catalog_ctrl *ctrl);
void (*mst_config)(struct dp_catalog_ctrl *ctrl, bool enable);
void (*trigger_act)(struct dp_catalog_ctrl *ctrl);
void (*read_act_complete_sts)(struct dp_catalog_ctrl *ctrl, bool *sts);
void (*channel_alloc)(struct dp_catalog_ctrl *ctrl,
u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt);
void (*update_rg)(struct dp_catalog_ctrl *ctrl, u32 ch, u32 x_int,
u32 y_frac_enum);
void (*channel_dealloc)(struct dp_catalog_ctrl *ctrl,
u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt);
void (*fec_config)(struct dp_catalog_ctrl *ctrl, bool enable);
void (*mainlink_levels)(struct dp_catalog_ctrl *ctrl, u8 lane_cnt);
};
struct dp_catalog_hpd {
void (*config_hpd)(struct dp_catalog_hpd *hpd, bool en);
u32 (*get_interrupt)(struct dp_catalog_hpd *hpd);
};
#define HEADER_BYTE_2_BIT 0
#define PARITY_BYTE_2_BIT 8
#define HEADER_BYTE_1_BIT 16
#define PARITY_BYTE_1_BIT 24
#define HEADER_BYTE_3_BIT 16
#define PARITY_BYTE_3_BIT 24
enum dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
DP_AUDIO_SDP_INFOFRAME,
DP_AUDIO_SDP_COPYMANAGEMENT,
DP_AUDIO_SDP_ISRC,
DP_AUDIO_SDP_MAX,
};
enum dp_catalog_audio_header_type {
DP_AUDIO_SDP_HEADER_1,
DP_AUDIO_SDP_HEADER_2,
DP_AUDIO_SDP_HEADER_3,
DP_AUDIO_SDP_HEADER_MAX,
};
struct dp_catalog_audio {
enum dp_catalog_audio_sdp_type sdp_type;
enum dp_catalog_audio_header_type sdp_header;
u32 data;
enum dp_stream_id stream_id;
void (*init)(struct dp_catalog_audio *audio);
void (*enable)(struct dp_catalog_audio *audio);
void (*config_acr)(struct dp_catalog_audio *audio);
void (*config_sdp)(struct dp_catalog_audio *audio);
void (*set_header)(struct dp_catalog_audio *audio);
void (*get_header)(struct dp_catalog_audio *audio);
};
struct dp_dsc_cfg_data {
bool dsc_en;
char pps[128];
u32 pps_len;
u32 pps_word[32];
u32 pps_word_len;
u8 parity[32];
u8 parity_len;
u32 parity_word[8];
u32 parity_word_len;
u32 slice_per_pkt;
u32 bytes_per_pkt;
u32 eol_byte_num;
u32 be_in_lane;
u32 dto_en;
u32 dto_n;
u32 dto_d;
u32 dto_count;
};
struct dp_catalog_panel {
u32 total;
u32 sync_start;
u32 width_blanking;
u32 dp_active;
u8 *spd_vendor_name;
u8 *spd_product_description;
struct dp_catalog_hdr_data hdr_data;
/* TPG */
u32 hsync_period;
u32 vsync_period;
u32 display_v_start;
u32 display_v_end;
u32 v_sync_width;
u32 hsync_ctl;
u32 display_hctl;
/* TU */
u32 dp_tu;
u32 valid_boundary;
u32 valid_boundary2;
u32 misc_val;
enum dp_stream_id stream_id;
bool widebus_en;
struct dp_dsc_cfg_data dsc;
int (*timing_cfg)(struct dp_catalog_panel *panel);
void (*config_hdr)(struct dp_catalog_panel *panel, bool en,
u32 dhdr_max_pkts);
void (*tpg_config)(struct dp_catalog_panel *panel, bool enable);
void (*config_spd)(struct dp_catalog_panel *panel);
void (*config_misc)(struct dp_catalog_panel *panel);
void (*config_msa)(struct dp_catalog_panel *panel,
u32 rate, u32 stream_rate_khz);
void (*update_transfer_unit)(struct dp_catalog_panel *panel);
void (*config_ctrl)(struct dp_catalog_panel *panel, u32 cfg);
void (*config_dto)(struct dp_catalog_panel *panel, bool ack);
void (*dsc_cfg)(struct dp_catalog_panel *panel);
void (*pps_flush)(struct dp_catalog_panel *panel);
void (*dhdr_flush)(struct dp_catalog_panel *panel);
bool (*dhdr_busy)(struct dp_catalog_panel *panel);
};
struct dp_catalog;
struct dp_catalog_priv {
void *data;
void (*put)(struct dp_catalog *catalog);
void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode);
};
struct dp_catalog {
struct dp_catalog_aux aux;
struct dp_catalog_ctrl ctrl;
struct dp_catalog_audio audio;
struct dp_catalog_panel panel;
struct dp_catalog_priv priv;
struct dp_catalog_hpd hpd;
void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode);
int (*get_reg_dump)(struct dp_catalog *dp_catalog,
char *mode, u8 **out_buf, u32 *out_buf_len);
};
static inline u8 dp_ecc_get_g0_value(u8 data)
{
u8 c[4];
u8 g[4];
u8 ret_data = 0;
u8 i;
for (i = 0; i < 4; i++)
c[i] = (data >> i) & 0x01;
g[0] = c[3];
g[1] = c[0] ^ c[3];
g[2] = c[1];
g[3] = c[2];
for (i = 0; i < 4; i++)
ret_data = ((g[i] & 0x01) << i) | ret_data;
return ret_data;
}
static inline u8 dp_ecc_get_g1_value(u8 data)
{
u8 c[4];
u8 g[4];
u8 ret_data = 0;
u8 i;
for (i = 0; i < 4; i++)
c[i] = (data >> i) & 0x01;
g[0] = c[0] ^ c[3];
g[1] = c[0] ^ c[1] ^ c[3];
g[2] = c[1] ^ c[2];
g[3] = c[2] ^ c[3];
for (i = 0; i < 4; i++)
ret_data = ((g[i] & 0x01) << i) | ret_data;
return ret_data;
}
static inline u8 dp_header_get_parity(u32 data)
{
u8 x0 = 0;
u8 x1 = 0;
u8 ci = 0;
u8 iData = 0;
u8 i = 0;
u8 parity_byte;
u8 num_byte = (data > 0xFF) ? 8 : 2;
for (i = 0; i < num_byte; i++) {
iData = (data >> i*4) & 0xF;
ci = iData ^ x1;
x1 = x0 ^ dp_ecc_get_g1_value(ci);
x0 = dp_ecc_get_g0_value(ci);
}
parity_byte = x1 | (x0 << 4);
return parity_byte;
}
static inline u32 dp_read(char *exe_mode, struct dp_io_data *io_data,
u32 offset)
{
u32 data = 0;
if (!strcmp(exe_mode, "hw") || !strcmp(exe_mode, "all")) {
data = readl_relaxed(io_data->io.base + offset);
} else if (!strcmp(exe_mode, "sw")) {
if (io_data->buf)
memcpy(&data, io_data->buf + offset, sizeof(offset));
}
return data;
}
static inline void dp_write(char *exe_mode, struct dp_io_data *io_data,
u32 offset, u32 data)
{
if (!strcmp(exe_mode, "hw") || !strcmp(exe_mode, "all"))
writel_relaxed(data, io_data->io.base + offset);
if (!strcmp(exe_mode, "sw") || !strcmp(exe_mode, "all")) {
if (io_data->buf)
memcpy(io_data->buf + offset, &data, sizeof(data));
}
}
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser);
void dp_catalog_put(struct dp_catalog *catalog);
int dp_catalog_get_v420(struct device *dev, struct dp_catalog *catalog,
void *io);
int dp_catalog_get_v200(struct device *dev, struct dp_catalog *catalog,
void *io);
#endif /* _DP_CATALOG_H_ */

304
msm/dp/dp_catalog_v200.c Normal file
View File

@@ -0,0 +1,304 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/delay.h>
#include "dp_catalog.h"
#include "dp_reg.h"
#define dp_catalog_get_priv_v200(x) ({ \
struct dp_catalog *dp_catalog; \
dp_catalog = container_of(x, struct dp_catalog, x); \
dp_catalog->priv.data; \
})
struct dp_catalog_io {
struct dp_io_data *dp_ahb;
struct dp_io_data *dp_aux;
struct dp_io_data *dp_link;
struct dp_io_data *dp_p0;
struct dp_io_data *dp_phy;
struct dp_io_data *dp_ln_tx0;
struct dp_io_data *dp_ln_tx1;
struct dp_io_data *dp_mmss_cc;
struct dp_io_data *dp_pll;
struct dp_io_data *usb3_dp_com;
struct dp_io_data *hdcp_physical;
struct dp_io_data *dp_p1;
struct dp_io_data *dp_tcsr;
};
struct dp_catalog_private_v200 {
struct device *dev;
struct dp_catalog_io *io;
char exe_mode[SZ_4];
};
static void dp_catalog_aux_clear_hw_interrupts_v200(struct dp_catalog_aux *aux)
{
struct dp_catalog_private_v200 *catalog;
struct dp_io_data *io_data;
u32 data = 0;
if (!aux) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v200(aux);
io_data = catalog->io->dp_phy;
data = dp_read(catalog->exe_mode, io_data,
DP_PHY_AUX_INTERRUPT_STATUS_V200);
dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200,
0x1f);
wmb(); /* make sure 0x1f is written before next write */
dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200,
0x9f);
wmb(); /* make sure 0x9f is written before next write */
dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_CLEAR_V200,
0);
wmb(); /* make sure register is cleared */
}
static void dp_catalog_aux_setup_v200(struct dp_catalog_aux *aux,
struct dp_aux_cfg *cfg)
{
struct dp_catalog_private_v200 *catalog;
struct dp_io_data *io_data;
int i = 0, sw_reset = 0;
if (!aux || !cfg) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v200(aux);
io_data = catalog->io->dp_ahb;
sw_reset = dp_read(catalog->exe_mode, io_data, DP_SW_RESET);
sw_reset |= BIT(0);
dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset);
usleep_range(1000, 1010); /* h/w recommended delay */
sw_reset &= ~BIT(0);
dp_write(catalog->exe_mode, io_data, DP_SW_RESET, sw_reset);
dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x4); /* bit 2 */
udelay(1000);
dp_write(catalog->exe_mode, io_data, DP_PHY_CTRL, 0x0); /* bit 2 */
wmb(); /* make sure programming happened */
io_data = catalog->io->dp_tcsr;
dp_write(catalog->exe_mode, io_data, 0x4c, 0x1); /* bit 0 & 2 */
wmb(); /* make sure programming happened */
io_data = catalog->io->dp_phy;
dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x3c);
wmb(); /* make sure PD programming happened */
dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x3d);
wmb(); /* make sure PD programming happened */
/* DP AUX CFG register programming */
io_data = catalog->io->dp_phy;
for (i = 0; i < PHY_AUX_CFG_MAX; i++)
dp_write(catalog->exe_mode, io_data, cfg[i].offset,
cfg[i].lut[cfg[i].current_index]);
dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK_V200,
0x1F);
wmb(); /* make sure AUX configuration is done before enabling it */
}
static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel,
u32 rate, u32 stream_rate_khz)
{
u32 pixel_m, pixel_n;
u32 mvid, nvid;
u32 const nvid_fixed = 0x8000;
u32 const link_rate_hbr2 = 540000;
u32 const link_rate_hbr3 = 810000;
struct dp_catalog_private_v200 *catalog;
struct dp_io_data *io_data;
u32 strm_reg_off = 0;
u32 mvid_reg_off = 0, nvid_reg_off = 0;
if (!panel) {
pr_err("invalid input\n");
return;
}
if (panel->stream_id >= DP_STREAM_MAX) {
pr_err("invalid stream_id:%d\n", panel->stream_id);
return;
}
catalog = dp_catalog_get_priv_v200(panel);
io_data = catalog->io->dp_mmss_cc;
if (panel->stream_id == DP_STREAM_1)
strm_reg_off = MMSS_DP_PIXEL1_M_V200 -
MMSS_DP_PIXEL_M_V200;
pixel_m = dp_read(catalog->exe_mode, io_data,
MMSS_DP_PIXEL_M_V200 + strm_reg_off);
pixel_n = dp_read(catalog->exe_mode, io_data,
MMSS_DP_PIXEL_N_V200 + strm_reg_off);
pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
mvid = (pixel_m & 0xFFFF) * 5;
nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
if (nvid < nvid_fixed) {
u32 temp;
temp = (nvid_fixed / nvid) * nvid;
mvid = (nvid_fixed / nvid) * mvid;
nvid = temp;
}
pr_debug("rate = %d\n", rate);
if (panel->widebus_en)
mvid <<= 1;
if (link_rate_hbr2 == rate)
nvid *= 2;
if (link_rate_hbr3 == rate)
nvid *= 3;
io_data = catalog->io->dp_link;
if (panel->stream_id == DP_STREAM_1) {
mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID;
nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID;
}
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_reg_off,
mvid);
dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_reg_off,
nvid);
}
static void dp_catalog_ctrl_lane_mapping_v200(struct dp_catalog_ctrl *ctrl,
bool flipped, char *lane_map)
{
struct dp_catalog_private_v200 *catalog;
struct dp_io_data *io_data;
u8 l_map[4] = { 0 }, i = 0, j = 0;
u32 lane_map_reg = 0;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v200(ctrl);
io_data = catalog->io->dp_link;
/* For flip case, swap phy lanes with ML0 and ML3, ML1 and ML2 */
if (flipped) {
for (i = 0; i < DP_MAX_PHY_LN; i++) {
if (lane_map[i] == DP_ML0) {
for (j = 0; j < DP_MAX_PHY_LN; j++) {
if (lane_map[j] == DP_ML3) {
l_map[i] = DP_ML3;
l_map[j] = DP_ML0;
break;
}
}
} else if (lane_map[i] == DP_ML1) {
for (j = 0; j < DP_MAX_PHY_LN; j++) {
if (lane_map[j] == DP_ML2) {
l_map[i] = DP_ML2;
l_map[j] = DP_ML1;
break;
}
}
}
}
} else {
/* Normal orientation */
for (i = 0; i < DP_MAX_PHY_LN; i++)
l_map[i] = lane_map[i];
}
lane_map_reg = ((l_map[3]&3)<<6)|((l_map[2]&3)<<4)|((l_map[1]&3)<<2)
|(l_map[0]&3);
dp_write(catalog->exe_mode, io_data, DP_LOGICAL2PHYSICAL_LANE_MAPPING,
lane_map_reg);
}
static void dp_catalog_ctrl_usb_reset_v200(struct dp_catalog_ctrl *ctrl,
bool flip)
{
}
static void dp_catalog_put_v200(struct dp_catalog *catalog)
{
struct dp_catalog_private_v200 *catalog_priv;
if (!catalog || !catalog->priv.data)
return;
catalog_priv = catalog->priv.data;
devm_kfree(catalog_priv->dev, catalog_priv);
}
static void dp_catalog_set_exe_mode_v200(struct dp_catalog *catalog, char *mode)
{
struct dp_catalog_private_v200 *catalog_priv;
if (!catalog || !catalog->priv.data)
return;
catalog_priv = catalog->priv.data;
strlcpy(catalog_priv->exe_mode, mode, sizeof(catalog_priv->exe_mode));
}
int dp_catalog_get_v200(struct device *dev, struct dp_catalog *catalog,
void *io)
{
struct dp_catalog_private_v200 *catalog_priv;
if (!dev || !catalog) {
pr_err("invalid input\n");
return -EINVAL;
}
catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL);
if (!catalog_priv)
return -ENOMEM;
catalog_priv->dev = dev;
catalog_priv->io = io;
catalog->priv.data = catalog_priv;
catalog->priv.put = dp_catalog_put_v200;
catalog->priv.set_exe_mode = dp_catalog_set_exe_mode_v200;
catalog->aux.clear_hw_interrupts =
dp_catalog_aux_clear_hw_interrupts_v200;
catalog->aux.setup = dp_catalog_aux_setup_v200;
catalog->panel.config_msa = dp_catalog_panel_config_msa_v200;
catalog->ctrl.lane_mapping = dp_catalog_ctrl_lane_mapping_v200;
catalog->ctrl.usb_reset = dp_catalog_ctrl_usb_reset_v200;
/* Set the default execution mode to hardware mode */
dp_catalog_set_exe_mode_v200(catalog, "hw");
return 0;
}

349
msm/dp/dp_catalog_v420.c Normal file
View File

@@ -0,0 +1,349 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include "dp_catalog.h"
#include "dp_reg.h"
#define dp_catalog_get_priv_v420(x) ({ \
struct dp_catalog *dp_catalog; \
dp_catalog = container_of(x, struct dp_catalog, x); \
dp_catalog->priv.data; \
})
#define MAX_VOLTAGE_LEVELS 4
#define MAX_PRE_EMP_LEVELS 4
static u8 const vm_pre_emphasis[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
{0x00, 0x0E, 0x16, 0xFF}, /* pe0, 0 db */
{0x00, 0x0E, 0x16, 0xFF}, /* pe1, 3.5 db */
{0x00, 0x0E, 0xFF, 0xFF}, /* pe2, 6.0 db */
{0xFF, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
};
/* voltage swing, 0.2v and 1.0v are not support */
static u8 const vm_voltage_swing[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
{0x07, 0x0F, 0x16, 0xFF}, /* sw0, 0.4v */
{0x11, 0x1E, 0x1F, 0xFF}, /* sw1, 0.6 v */
{0x1A, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
{0xFF, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
struct dp_catalog_io {
struct dp_io_data *dp_ahb;
struct dp_io_data *dp_aux;
struct dp_io_data *dp_link;
struct dp_io_data *dp_p0;
struct dp_io_data *dp_phy;
struct dp_io_data *dp_ln_tx0;
struct dp_io_data *dp_ln_tx1;
struct dp_io_data *dp_mmss_cc;
struct dp_io_data *dp_pll;
struct dp_io_data *usb3_dp_com;
struct dp_io_data *hdcp_physical;
struct dp_io_data *dp_p1;
};
struct dp_catalog_private_v420 {
struct device *dev;
struct dp_catalog_io *io;
char exe_mode[SZ_4];
};
static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux,
struct dp_aux_cfg *cfg)
{
struct dp_catalog_private_v420 *catalog;
struct dp_io_data *io_data;
int i = 0;
if (!aux || !cfg) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v420(aux);
io_data = catalog->io->dp_phy;
dp_write(catalog->exe_mode, io_data, DP_PHY_PD_CTL, 0x67);
wmb(); /* make sure PD programming happened */
/* Turn on BIAS current for PHY/PLL */
io_data = catalog->io->dp_pll;
dp_write(catalog->exe_mode, io_data, QSERDES_COM_BIAS_EN_CLKBUFLR_EN,
0x17);
wmb(); /* make sure BIAS programming happened */
io_data = catalog->io->dp_phy;
/* DP AUX CFG register programming */
for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
pr_debug("%s: offset=0x%08x, value=0x%08x\n",
dp_phy_aux_config_type_to_string(i),
cfg[i].offset, cfg[i].lut[cfg[i].current_index]);
dp_write(catalog->exe_mode, io_data, cfg[i].offset,
cfg[i].lut[cfg[i].current_index]);
}
wmb(); /* make sure DP AUX CFG programming happened */
dp_write(catalog->exe_mode, io_data, DP_PHY_AUX_INTERRUPT_MASK_V420,
0x1F);
}
static void dp_catalog_aux_clear_hw_interrupts_v420(struct dp_catalog_aux *aux)
{
struct dp_catalog_private_v420 *catalog;
struct dp_io_data *io_data;
u32 data = 0;
if (!aux) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v420(aux);
io_data = catalog->io->dp_phy;
data = dp_read(catalog->exe_mode, io_data,
DP_PHY_AUX_INTERRUPT_STATUS_V420);
dp_write(catalog->exe_mode, io_data,
DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x1f);
wmb(); /* make sure 0x1f is written before next write */
dp_write(catalog->exe_mode, io_data,
DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x9f);
wmb(); /* make sure 0x9f is written before next write */
dp_write(catalog->exe_mode, io_data,
DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0);
wmb(); /* make sure register is cleared */
}
static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
u32 rate, u32 stream_rate_khz)
{
u32 pixel_m, pixel_n;
u32 mvid, nvid, reg_off = 0, mvid_off = 0, nvid_off = 0;
u32 const nvid_fixed = 0x8000;
u32 const link_rate_hbr2 = 540000;
u32 const link_rate_hbr3 = 810000;
struct dp_catalog_private_v420 *catalog;
struct dp_io_data *io_data;
if (!panel || !rate) {
pr_err("invalid input\n");
return;
}
if (panel->stream_id >= DP_STREAM_MAX) {
pr_err("invalid stream id:%d\n", panel->stream_id);
return;
}
catalog = dp_catalog_get_priv_v420(panel);
io_data = catalog->io->dp_mmss_cc;
if (panel->stream_id == DP_STREAM_1)
reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420;
pixel_m = dp_read(catalog->exe_mode, io_data,
MMSS_DP_PIXEL_M_V420 + reg_off);
pixel_n = dp_read(catalog->exe_mode, io_data,
MMSS_DP_PIXEL_N_V420 + reg_off);
pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
mvid = (pixel_m & 0xFFFF) * 5;
nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
if (nvid < nvid_fixed) {
u32 temp;
temp = (nvid_fixed / nvid) * nvid;
mvid = (nvid_fixed / nvid) * mvid;
nvid = temp;
}
pr_debug("rate = %d\n", rate);
if (panel->widebus_en)
mvid <<= 1;
if (link_rate_hbr2 == rate)
nvid *= 2;
if (link_rate_hbr3 == rate)
nvid *= 3;
io_data = catalog->io->dp_link;
if (panel->stream_id == DP_STREAM_1) {
mvid_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID;
nvid_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID;
}
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_MVID + mvid_off, mvid);
dp_write(catalog->exe_mode, io_data, DP_SOFTWARE_NVID + nvid_off, nvid);
}
static void dp_catalog_ctrl_phy_lane_cfg_v420(struct dp_catalog_ctrl *ctrl,
bool flipped, u8 ln_cnt)
{
u32 info = 0x0;
struct dp_catalog_private_v420 *catalog;
u8 orientation = BIT(!!flipped);
struct dp_io_data *io_data;
if (!ctrl) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v420(ctrl);
io_data = catalog->io->dp_phy;
info |= (ln_cnt & 0x0F);
info |= ((orientation & 0x0F) << 4);
pr_debug("Shared Info = 0x%x\n", info);
dp_write(catalog->exe_mode, io_data, DP_PHY_SPARE0_V420, info);
}
static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl,
u8 v_level, u8 p_level)
{
struct dp_catalog_private_v420 *catalog;
struct dp_io_data *io_data;
u8 value0, value1;
if (!ctrl || !((v_level < MAX_VOLTAGE_LEVELS)
&& (p_level < MAX_PRE_EMP_LEVELS))) {
pr_err("invalid input\n");
return;
}
catalog = dp_catalog_get_priv_v420(ctrl);
pr_debug("hw: v=%d p=%d\n", v_level, p_level);
value0 = vm_voltage_swing[v_level][p_level];
value1 = vm_pre_emphasis[v_level][p_level];
/* program default setting first */
io_data = catalog->io->dp_ln_tx0;
dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, 0x2A);
dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
io_data = catalog->io->dp_ln_tx1;
dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420, 0x2A);
dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL, 0x20);
/* Enable MUX to use Cursor values from these registers */
value0 |= BIT(5);
value1 |= BIT(5);
/* Configure host and panel only if both values are allowed */
if (value0 != 0xFF && value1 != 0xFF) {
io_data = catalog->io->dp_ln_tx0;
dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420,
value0);
dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL,
value1);
io_data = catalog->io->dp_ln_tx1;
dp_write(catalog->exe_mode, io_data, TXn_TX_DRV_LVL_V420,
value0);
dp_write(catalog->exe_mode, io_data, TXn_TX_EMP_POST1_LVL,
value1);
pr_debug("hw: vx_value=0x%x px_value=0x%x\n",
value0, value1);
} else {
pr_err("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n",
v_level, value0, p_level, value1);
}
}
static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl,
u8 ln_pnswap)
{
struct dp_catalog_private_v420 *catalog;
struct dp_io_data *io_data;
u32 cfg0, cfg1;
catalog = dp_catalog_get_priv_v420(ctrl);
cfg0 = 0x0a;
cfg1 = 0x0a;
cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
io_data = catalog->io->dp_ln_tx0;
dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg0);
io_data = catalog->io->dp_ln_tx1;
dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg1);
}
static void dp_catalog_put_v420(struct dp_catalog *catalog)
{
struct dp_catalog_private_v420 *catalog_priv;
if (!catalog || !catalog->priv.data)
return;
catalog_priv = catalog->priv.data;
devm_kfree(catalog_priv->dev, catalog_priv);
}
static void dp_catalog_set_exe_mode_v420(struct dp_catalog *catalog, char *mode)
{
struct dp_catalog_private_v420 *catalog_priv;
if (!catalog || !catalog->priv.data)
return;
catalog_priv = catalog->priv.data;
strlcpy(catalog_priv->exe_mode, mode, sizeof(catalog_priv->exe_mode));
}
int dp_catalog_get_v420(struct device *dev, struct dp_catalog *catalog,
void *io)
{
struct dp_catalog_private_v420 *catalog_priv;
if (!dev || !catalog) {
pr_err("invalid input\n");
return -EINVAL;
}
catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL);
if (!catalog_priv)
return -ENOMEM;
catalog_priv->dev = dev;
catalog_priv->io = io;
catalog->priv.data = catalog_priv;
catalog->priv.put = dp_catalog_put_v420;
catalog->priv.set_exe_mode = dp_catalog_set_exe_mode_v420;
catalog->aux.setup = dp_catalog_aux_setup_v420;
catalog->aux.clear_hw_interrupts =
dp_catalog_aux_clear_hw_interrupts_v420;
catalog->panel.config_msa = dp_catalog_panel_config_msa_v420;
catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420;
catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420;
catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420;
/* Set the default execution mode to hardware mode */
dp_catalog_set_exe_mode_v420(catalog, "hw");
return 0;
}

1317
msm/dp/dp_ctrl.c Normal file

File diff suppressed because it is too large Load Diff

48
msm/dp/dp_ctrl.h Normal file
View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_CTRL_H_
#define _DP_CTRL_H_
#include "dp_aux.h"
#include "dp_panel.h"
#include "dp_link.h"
#include "dp_parser.h"
#include "dp_power.h"
#include "dp_catalog.h"
struct dp_ctrl {
int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset);
void (*deinit)(struct dp_ctrl *dp_ctrl);
int (*on)(struct dp_ctrl *dp_ctrl, bool mst_mode, bool fec_en,
bool shallow);
void (*off)(struct dp_ctrl *dp_ctrl);
void (*abort)(struct dp_ctrl *dp_ctrl);
void (*isr)(struct dp_ctrl *dp_ctrl);
bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl);
void (*process_phy_test_request)(struct dp_ctrl *dp_ctrl);
int (*link_maintenance)(struct dp_ctrl *dp_ctrl);
int (*stream_on)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel);
void (*stream_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel);
void (*stream_pre_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel);
void (*set_mst_channel_info)(struct dp_ctrl *dp_ctrl,
enum dp_stream_id strm,
u32 ch_start_slot, u32 ch_tot_slots);
};
struct dp_ctrl_in {
struct device *dev;
struct dp_panel *panel;
struct dp_aux *aux;
struct dp_link *link;
struct dp_parser *parser;
struct dp_power *power;
struct dp_catalog_ctrl *catalog;
};
struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in);
void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
#endif /* _DP_CTRL_H_ */

2101
msm/dp/dp_debug.c Normal file

File diff suppressed because it is too large Load Diff

89
msm/dp/dp_debug.h Normal file
View File

@@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_DEBUG_H_
#define _DP_DEBUG_H_
#include "dp_panel.h"
#include "dp_ctrl.h"
#include "dp_link.h"
#include "dp_usbpd.h"
#include "dp_aux.h"
#include "dp_display.h"
/**
* struct dp_debug
* @debug_en: specifies whether debug mode enabled
* @vdisplay: used to filter out vdisplay value
* @hdisplay: used to filter out hdisplay value
* @vrefresh: used to filter out vrefresh value
* @tpg_state: specifies whether tpg feature is enabled
* @max_pclk_khz: max pclk supported
* @force_encryption: enable/disable forced encryption for HDCP 2.2
*/
struct dp_debug {
bool debug_en;
bool sim_mode;
bool psm_enabled;
bool hdcp_disabled;
int aspect_ratio;
int vdisplay;
int hdisplay;
int vrefresh;
bool tpg_state;
u32 max_pclk_khz;
bool force_encryption;
char hdcp_status[SZ_128];
struct dp_mst_connector dp_mst_connector_list;
bool mst_hpd_sim;
u32 mst_port_cnt;
u8 *(*get_edid)(struct dp_debug *dp_debug);
void (*abort)(struct dp_debug *dp_debug);
};
/**
* struct dp_debug_in
* @dev: device instance of the caller
* @panel: instance of panel module
* @hpd: instance of hpd module
* @link: instance of link module
* @aux: instance of aux module
* @connector: double pointer to display connector
* @catalog: instance of catalog module
* @parser: instance of parser module
*/
struct dp_debug_in {
struct device *dev;
struct dp_panel *panel;
struct dp_hpd *hpd;
struct dp_link *link;
struct dp_aux *aux;
struct drm_connector **connector;
struct dp_catalog *catalog;
struct dp_parser *parser;
struct dp_ctrl *ctrl;
};
/**
* dp_debug_get() - configure and get the DisplayPlot debug module data
*
* @in: input structure containing data to initialize the debug module
* return: pointer to allocated debug module data
*
* This function sets up the debug module and provides a way
* for debugfs input to be communicated with existing modules
*/
struct dp_debug *dp_debug_get(struct dp_debug_in *in);
/**
* dp_debug_put()
*
* Cleans up dp_debug instance
*
* @dp_debug: instance of dp_debug
*/
void dp_debug_put(struct dp_debug *dp_debug);
#endif /* _DP_DEBUG_H_ */

2635
msm/dp/dp_display.c Normal file

File diff suppressed because it is too large Load Diff

145
msm/dp/dp_display.h Normal file
View File

@@ -0,0 +1,145 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_DISPLAY_H_
#define _DP_DISPLAY_H_
#include <linux/list.h>
#include <drm/drmP.h>
#include <drm/msm_drm.h>
#include "dp_panel.h"
#define DP_MST_SIM_MAX_PORTS 2
enum dp_drv_state {
PM_DEFAULT,
PM_SUSPEND,
};
struct dp_mst_hpd_info {
bool mst_protocol;
bool mst_hpd_sim;
u32 mst_port_cnt;
u8 *edid;
};
struct dp_mst_drm_cbs {
void (*hpd)(void *display, bool hpd_status,
struct dp_mst_hpd_info *info);
void (*hpd_irq)(void *display, struct dp_mst_hpd_info *info);
void (*set_drv_state)(void *dp_display,
enum dp_drv_state mst_state);
};
struct dp_mst_drm_install_info {
void *dp_mst_prv_info;
const struct dp_mst_drm_cbs *cbs;
};
struct dp_mst_caps {
bool has_mst;
u32 max_streams_supported;
u32 max_dpcd_transaction_bytes;
struct drm_dp_aux *drm_aux;
};
struct dp_mst_connector {
bool debug_en;
int con_id;
int hdisplay;
int vdisplay;
int vrefresh;
int aspect_ratio;
struct drm_connector *conn;
struct mutex lock;
struct list_head list;
enum drm_connector_status state;
};
struct dp_display {
struct drm_device *drm_dev;
struct dp_bridge *bridge;
struct drm_connector *base_connector;
void *base_dp_panel;
bool is_sst_connected;
bool is_mst_supported;
u32 max_pclk_khz;
void *dp_mst_prv_info;
int (*enable)(struct dp_display *dp_display, void *panel);
int (*post_enable)(struct dp_display *dp_display, void *panel);
int (*pre_disable)(struct dp_display *dp_display, void *panel);
int (*disable)(struct dp_display *dp_display, void *panel);
int (*set_mode)(struct dp_display *dp_display, void *panel,
struct dp_display_mode *mode);
enum drm_mode_status (*validate_mode)(struct dp_display *dp_display,
void *panel, struct drm_display_mode *mode);
int (*get_modes)(struct dp_display *dp_display, void *panel,
struct dp_display_mode *dp_mode);
int (*prepare)(struct dp_display *dp_display, void *panel);
int (*unprepare)(struct dp_display *dp_display, void *panel);
int (*request_irq)(struct dp_display *dp_display);
struct dp_debug *(*get_debug)(struct dp_display *dp_display);
void (*post_open)(struct dp_display *dp_display);
int (*config_hdr)(struct dp_display *dp_display, void *panel,
struct drm_msm_ext_hdr_metadata *hdr_meta,
bool dhdr_update);
int (*post_init)(struct dp_display *dp_display);
int (*mst_install)(struct dp_display *dp_display,
struct dp_mst_drm_install_info *mst_install_info);
int (*mst_uninstall)(struct dp_display *dp_display);
int (*mst_connector_install)(struct dp_display *dp_display,
struct drm_connector *connector);
int (*mst_connector_uninstall)(struct dp_display *dp_display,
struct drm_connector *connector);
int (*mst_connector_update_edid)(struct dp_display *dp_display,
struct drm_connector *connector,
struct edid *edid);
int (*mst_connector_update_link_info)(struct dp_display *dp_display,
struct drm_connector *connector);
int (*mst_get_connector_info)(struct dp_display *dp_display,
struct drm_connector *connector,
struct dp_mst_connector *mst_conn);
int (*mst_get_fixed_topology_port)(struct dp_display *dp_display,
u32 strm_id, u32 *port_num);
int (*get_mst_caps)(struct dp_display *dp_display,
struct dp_mst_caps *mst_caps);
int (*set_stream_info)(struct dp_display *dp_display, void *panel,
u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn,
int vcpi);
void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel,
const struct drm_display_mode *drm_mode,
struct dp_display_mode *dp_mode);
int (*update_pps)(struct dp_display *dp_display,
struct drm_connector *connector, char *pps_cmd);
};
#ifdef CONFIG_DRM_MSM_DP
int dp_display_get_num_of_displays(void);
int dp_display_get_displays(void **displays, int count);
int dp_display_get_num_of_streams(void);
#else
static inline int dp_display_get_num_of_displays(void)
{
return 0;
}
static inline int dp_display_get_displays(void **displays, int count)
{
return 0;
}
static inline int dp_display_get_num_of_streams(void)
{
return 0;
}
static inline int dp_connector_update_pps(struct drm_connector *connector,
char *pps_cmd, void *display)
{
return 0;
}
#endif
#endif /* _DP_DISPLAY_H_ */

624
msm/dp/dp_drm.c Normal file
View File

@@ -0,0 +1,624 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp]: %s: " fmt, __func__
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "sde_connector.h"
#include "dp_drm.h"
#include "dp_debug.h"
#define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#define to_dp_bridge(x) container_of((x), struct dp_bridge, base)
void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
struct drm_display_mode *drm_mode)
{
u32 flags = 0;
memset(drm_mode, 0, sizeof(*drm_mode));
drm_mode->hdisplay = dp_mode->timing.h_active;
drm_mode->hsync_start = drm_mode->hdisplay +
dp_mode->timing.h_front_porch;
drm_mode->hsync_end = drm_mode->hsync_start +
dp_mode->timing.h_sync_width;
drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch;
drm_mode->hskew = dp_mode->timing.h_skew;
drm_mode->vdisplay = dp_mode->timing.v_active;
drm_mode->vsync_start = drm_mode->vdisplay +
dp_mode->timing.v_front_porch;
drm_mode->vsync_end = drm_mode->vsync_start +
dp_mode->timing.v_sync_width;
drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch;
drm_mode->vrefresh = dp_mode->timing.refresh_rate;
drm_mode->clock = dp_mode->timing.pixel_clk_khz;
if (dp_mode->timing.h_active_low)
flags |= DRM_MODE_FLAG_NHSYNC;
else
flags |= DRM_MODE_FLAG_PHSYNC;
if (dp_mode->timing.v_active_low)
flags |= DRM_MODE_FLAG_NVSYNC;
else
flags |= DRM_MODE_FLAG_PVSYNC;
drm_mode->flags = flags;
drm_mode->type = 0x48;
drm_mode_set_name(drm_mode);
}
static int dp_bridge_attach(struct drm_bridge *dp_bridge)
{
struct dp_bridge *bridge = to_dp_bridge(dp_bridge);
if (!dp_bridge) {
pr_err("Invalid params\n");
return -EINVAL;
}
pr_debug("[%d] attached\n", bridge->id);
return 0;
}
static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge)
{
int rc = 0;
struct dp_bridge *bridge;
struct dp_display *dp;
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
}
bridge = to_dp_bridge(drm_bridge);
dp = bridge->display;
if (!bridge->connector) {
pr_err("Invalid connector\n");
return;
}
if (!bridge->dp_panel) {
pr_err("Invalid dp_panel\n");
return;
}
/* By this point mode should have been validated through mode_fixup */
rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode);
if (rc) {
pr_err("[%d] failed to perform a mode set, rc=%d\n",
bridge->id, rc);
return;
}
rc = dp->prepare(dp, bridge->dp_panel);
if (rc) {
pr_err("[%d] DP display prepare failed, rc=%d\n",
bridge->id, rc);
return;
}
/* for SST force stream id, start slot and total slots to 0 */
dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0);
rc = dp->enable(dp, bridge->dp_panel);
if (rc) {
pr_err("[%d] DP display enable failed, rc=%d\n",
bridge->id, rc);
dp->unprepare(dp, bridge->dp_panel);
}
}
static void dp_bridge_enable(struct drm_bridge *drm_bridge)
{
int rc = 0;
struct dp_bridge *bridge;
struct dp_display *dp;
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
}
bridge = to_dp_bridge(drm_bridge);
if (!bridge->connector) {
pr_err("Invalid connector\n");
return;
}
if (!bridge->dp_panel) {
pr_err("Invalid dp_panel\n");
return;
}
dp = bridge->display;
rc = dp->post_enable(dp, bridge->dp_panel);
if (rc)
pr_err("[%d] DP display post enable failed, rc=%d\n",
bridge->id, rc);
}
static void dp_bridge_disable(struct drm_bridge *drm_bridge)
{
int rc = 0;
struct dp_bridge *bridge;
struct dp_display *dp;
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
}
bridge = to_dp_bridge(drm_bridge);
if (!bridge->connector) {
pr_err("Invalid connector\n");
return;
}
if (!bridge->dp_panel) {
pr_err("Invalid dp_panel\n");
return;
}
dp = bridge->display;
if (!dp) {
pr_err("dp is null\n");
return;
}
if (dp)
sde_connector_helper_bridge_disable(bridge->connector);
rc = dp->pre_disable(dp, bridge->dp_panel);
if (rc) {
pr_err("[%d] DP display pre disable failed, rc=%d\n",
bridge->id, rc);
}
}
static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
{
int rc = 0;
struct dp_bridge *bridge;
struct dp_display *dp;
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
}
bridge = to_dp_bridge(drm_bridge);
if (!bridge->connector) {
pr_err("Invalid connector\n");
return;
}
if (!bridge->dp_panel) {
pr_err("Invalid dp_panel\n");
return;
}
dp = bridge->display;
rc = dp->disable(dp, bridge->dp_panel);
if (rc) {
pr_err("[%d] DP display disable failed, rc=%d\n",
bridge->id, rc);
return;
}
rc = dp->unprepare(dp, bridge->dp_panel);
if (rc) {
pr_err("[%d] DP display unprepare failed, rc=%d\n",
bridge->id, rc);
return;
}
}
static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct dp_bridge *bridge;
struct dp_display *dp;
if (!drm_bridge || !mode || !adjusted_mode) {
pr_err("Invalid params\n");
return;
}
bridge = to_dp_bridge(drm_bridge);
if (!bridge->connector) {
pr_err("Invalid connector\n");
return;
}
if (!bridge->dp_panel) {
pr_err("Invalid dp_panel\n");
return;
}
dp = bridge->display;
dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode,
&bridge->dp_mode);
}
static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ret = true;
struct dp_display_mode dp_mode;
struct dp_bridge *bridge;
struct dp_display *dp;
if (!drm_bridge || !mode || !adjusted_mode) {
pr_err("Invalid params\n");
ret = false;
goto end;
}
bridge = to_dp_bridge(drm_bridge);
if (!bridge->connector) {
pr_err("Invalid connector\n");
ret = false;
goto end;
}
if (!bridge->dp_panel) {
pr_err("Invalid dp_panel\n");
ret = false;
goto end;
}
dp = bridge->display;
dp->convert_to_dp_mode(dp, bridge->dp_panel, mode, &dp_mode);
convert_to_drm_mode(&dp_mode, adjusted_mode);
end:
return ret;
}
static const struct drm_bridge_funcs dp_bridge_ops = {
.attach = dp_bridge_attach,
.mode_fixup = dp_bridge_mode_fixup,
.pre_enable = dp_bridge_pre_enable,
.enable = dp_bridge_enable,
.disable = dp_bridge_disable,
.post_disable = dp_bridge_post_disable,
.mode_set = dp_bridge_mode_set,
};
int dp_connector_config_hdr(struct drm_connector *connector, void *display,
struct sde_connector_state *c_state)
{
struct dp_display *dp = display;
struct sde_connector *sde_conn;
if (!display || !c_state || !connector) {
pr_err("invalid params\n");
return -EINVAL;
}
sde_conn = to_sde_connector(connector);
if (!sde_conn->drv_panel) {
pr_err("invalid dp panel\n");
return -EINVAL;
}
return dp->config_hdr(dp, sde_conn->drv_panel, &c_state->hdr_meta,
c_state->dyn_hdr_meta.dynamic_hdr_update);
}
int dp_connector_post_init(struct drm_connector *connector, void *display)
{
int rc;
struct dp_display *dp_display = display;
struct sde_connector *sde_conn;
if (!dp_display || !connector)
return -EINVAL;
dp_display->base_connector = connector;
dp_display->bridge->connector = connector;
if (dp_display->post_init) {
rc = dp_display->post_init(dp_display);
if (rc)
goto end;
}
sde_conn = to_sde_connector(connector);
dp_display->bridge->dp_panel = sde_conn->drv_panel;
rc = dp_mst_init(dp_display);
end:
return rc;
}
int dp_connector_get_mode_info(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info,
u32 max_mixer_width, void *display)
{
const u32 dual_lm = 2;
const u32 single_lm = 1;
const u32 single_intf = 1;
const u32 no_enc = 0;
struct msm_display_topology *topology;
struct sde_connector *sde_conn;
struct dp_panel *dp_panel;
struct dp_display_mode dp_mode;
struct dp_display *dp_disp = display;
if (!drm_mode || !mode_info || !max_mixer_width || !connector ||
!display) {
pr_err("invalid params\n");
return -EINVAL;
}
memset(mode_info, 0, sizeof(*mode_info));
sde_conn = to_sde_connector(connector);
dp_panel = sde_conn->drv_panel;
topology = &mode_info->topology;
topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
dual_lm : single_lm;
topology->num_enc = no_enc;
topology->num_intf = single_intf;
mode_info->frame_rate = drm_mode->vrefresh;
mode_info->vtotal = drm_mode->vtotal;
mode_info->wide_bus_en = dp_panel->widebus_en;
dp_disp->convert_to_dp_mode(dp_disp, dp_panel, drm_mode, &dp_mode);
if (dp_mode.timing.comp_info.comp_ratio) {
memcpy(&mode_info->comp_info,
&dp_mode.timing.comp_info,
sizeof(mode_info->comp_info));
topology->num_enc = topology->num_lm;
}
return 0;
}
int dp_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info, void *data)
{
struct dp_display *display = data;
if (!info || !display || !display->drm_dev) {
pr_err("invalid params\n");
return -EINVAL;
}
info->intf_type = DRM_MODE_CONNECTOR_DisplayPort;
info->num_of_h_tiles = 1;
info->h_tile_instance[0] = 0;
info->is_connected = display->is_sst_connected;
info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
MSM_DISPLAY_CAP_HOT_PLUG;
return 0;
}
enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
bool force,
void *display)
{
enum drm_connector_status status = connector_status_unknown;
struct msm_display_info info;
int rc;
if (!conn || !display)
return status;
/* get display dp_info */
memset(&info, 0x0, sizeof(info));
rc = dp_connector_get_info(conn, &info, display);
if (rc) {
pr_err("failed to get display info, rc=%d\n", rc);
return connector_status_disconnected;
}
if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
status = (info.is_connected ? connector_status_connected :
connector_status_disconnected);
else
status = connector_status_connected;
conn->display_info.width_mm = info.width_mm;
conn->display_info.height_mm = info.height_mm;
return status;
}
void dp_connector_post_open(struct drm_connector *connector, void *display)
{
struct dp_display *dp;
if (!display) {
pr_err("invalid input\n");
return;
}
dp = display;
if (dp->post_open)
dp->post_open(dp);
}
int dp_connector_get_modes(struct drm_connector *connector,
void *display)
{
int rc = 0;
struct dp_display *dp;
struct dp_display_mode *dp_mode = NULL;
struct drm_display_mode *m, drm_mode;
struct sde_connector *sde_conn;
if (!connector || !display)
return 0;
sde_conn = to_sde_connector(connector);
if (!sde_conn->drv_panel) {
pr_err("invalid dp panel\n");
return 0;
}
dp = display;
dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL);
if (!dp_mode)
return 0;
/* pluggable case assumes EDID is read when HPD */
if (dp->is_sst_connected) {
rc = dp->get_modes(dp, sde_conn->drv_panel, dp_mode);
if (!rc)
pr_err("failed to get DP sink modes, rc=%d\n", rc);
if (dp_mode->timing.pixel_clk_khz) { /* valid DP mode */
memset(&drm_mode, 0x0, sizeof(drm_mode));
convert_to_drm_mode(dp_mode, &drm_mode);
m = drm_mode_duplicate(connector->dev, &drm_mode);
if (!m) {
pr_err("failed to add mode %ux%u\n",
drm_mode.hdisplay,
drm_mode.vdisplay);
kfree(dp_mode);
return 0;
}
m->width_mm = connector->display_info.width_mm;
m->height_mm = connector->display_info.height_mm;
drm_mode_probed_add(connector, m);
}
} else {
pr_err("No sink connected\n");
}
kfree(dp_mode);
return rc;
}
int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
{
int rc = 0;
struct dp_bridge *bridge;
struct drm_device *dev;
struct dp_display *display = data;
struct msm_drm_private *priv = NULL;
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge) {
rc = -ENOMEM;
goto error;
}
dev = display->drm_dev;
bridge->display = display;
bridge->base.funcs = &dp_bridge_ops;
bridge->base.encoder = encoder;
priv = dev->dev_private;
rc = drm_bridge_attach(encoder, &bridge->base, NULL);
if (rc) {
pr_err("failed to attach bridge, rc=%d\n", rc);
goto error_free_bridge;
}
rc = display->request_irq(display);
if (rc) {
pr_err("request_irq failed, rc=%d\n", rc);
goto error_free_bridge;
}
encoder->bridge = &bridge->base;
priv->bridges[priv->num_bridges++] = &bridge->base;
display->bridge = bridge;
return 0;
error_free_bridge:
kfree(bridge);
error:
return rc;
}
void dp_drm_bridge_deinit(void *data)
{
struct dp_display *display = data;
struct dp_bridge *bridge = display->bridge;
if (bridge && bridge->base.encoder)
bridge->base.encoder->bridge = NULL;
kfree(bridge);
}
enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode, void *display)
{
struct dp_display *dp_disp;
struct sde_connector *sde_conn;
if (!mode || !display || !connector) {
pr_err("invalid params\n");
return MODE_ERROR;
}
sde_conn = to_sde_connector(connector);
if (!sde_conn->drv_panel) {
pr_err("invalid dp panel\n");
return MODE_ERROR;
}
dp_disp = display;
mode->vrefresh = drm_mode_vrefresh(mode);
return dp_disp->validate_mode(dp_disp, sde_conn->drv_panel, mode);
}
int dp_connector_update_pps(struct drm_connector *connector,
char *pps_cmd, void *display)
{
struct dp_display *dp_disp;
struct sde_connector *sde_conn;
if (!display || !connector) {
pr_err("invalid params\n");
return -EINVAL;
}
sde_conn = to_sde_connector(connector);
if (!sde_conn->drv_panel) {
pr_err("invalid dp panel\n");
return MODE_ERROR;
}
dp_disp = display;
return dp_disp->update_pps(dp_disp, connector, pps_cmd);
}

247
msm/dp/dp_drm.h Normal file
View File

@@ -0,0 +1,247 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_DRM_H_
#define _DP_DRM_H_
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include "msm_drv.h"
#include "dp_display.h"
struct dp_bridge {
struct drm_bridge base;
u32 id;
struct drm_connector *connector;
struct dp_display *display;
struct dp_display_mode dp_mode;
void *dp_panel;
};
#ifdef CONFIG_DRM_MSM_DP
/**
* dp_connector_config_hdr - callback to configure HDR
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* @c_state: connect state data
* Returns: Zero on success
*/
int dp_connector_config_hdr(struct drm_connector *connector,
void *display,
struct sde_connector_state *c_state);
/**
* dp_connector_post_init - callback to perform additional initialization steps
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: Zero on success
*/
int dp_connector_post_init(struct drm_connector *connector, void *display);
/**
* dp_connector_detect - callback to determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
* @display: Pointer to private display handle
* Returns: Connector 'is connected' status
*/
enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
bool force,
void *display);
/**
* dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: Number of modes added
*/
int dp_connector_get_modes(struct drm_connector *connector,
void *display);
/**
* dp_connector_mode_valid - callback to determine if specified mode is valid
* @connector: Pointer to drm connector structure
* @mode: Pointer to drm mode structure
* @display: Pointer to private display handle
* Returns: Validity status for specified mode
*/
enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode,
void *display);
/**
* dp_connector_get_mode_info - retrieve information of the mode selected
* @connector: Pointer to drm connector structure
* @drm_mode: Display mode set for the display
* @mode_info: Out parameter. Information of the mode
* @max_mixer_width: max width supported by HW layer mixer
* @display: Pointer to private display structure
* Returns: zero on success
*/
int dp_connector_get_mode_info(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info,
u32 max_mixer_width, void *display);
/**
* dp_connector_get_info - retrieve connector display info
* @connector: Pointer to drm connector structure
* @info: Out parameter. Information of the connected display
* @display: Pointer to private display structure
* Returns: zero on success
*/
int dp_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info, void *display);
/**
* dp_connector_post_open - handle the post open functionalites
* @connector: Pointer to drm connector structure
* @display: Pointer to private display structure
*/
void dp_connector_post_open(struct drm_connector *connector, void *display);
int dp_drm_bridge_init(void *display,
struct drm_encoder *encoder);
void dp_drm_bridge_deinit(void *display);
/**
* convert_to_drm_mode - convert dp mode to drm mode
* @dp_mode: Point to dp mode
* @drm_mode: Pointer to drm mode
*/
void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
struct drm_display_mode *drm_mode);
/**
* dp_connector_update_pps - update pps for given connector
* @dp_mode: Point to dp mode
* @pps_cmd: PPS packet
* @display: Pointer to private display structure
*/
int dp_connector_update_pps(struct drm_connector *connector,
char *pps_cmd, void *display);
/**
* dp_mst_drm_bridge_init - initialize mst bridge
* @display: Pointer to private display structure
* @encoder: Pointer to encoder for mst bridge mapping
*/
int dp_mst_drm_bridge_init(void *display,
struct drm_encoder *encoder);
/**
* dp_mst_drm_bridge_deinit - de-initialize mst bridges
* @display: Pointer to private display structure
*/
void dp_mst_drm_bridge_deinit(void *display);
/**
* dp_mst_init - initialize mst objects for the given display
* @display: Pointer to private display structure
*/
int dp_mst_init(struct dp_display *dp_display);
/**
* dp_mst_deinit - de-initialize mst objects for the given display
* @display: Pointer to private display structure
*/
void dp_mst_deinit(struct dp_display *dp_display);
#else
static inline int dp_connector_config_hdr(struct drm_connector *connector,
void *display, struct sde_connector_state *c_state)
{
return 0;
}
static inline int dp_connector_post_init(struct drm_connector *connector,
void *display)
{
return 0;
}
static inline enum drm_connector_status dp_connector_detect(
struct drm_connector *conn,
bool force,
void *display)
{
return 0;
}
static inline int dp_connector_get_modes(struct drm_connector *connector,
void *display)
{
return 0;
}
static inline enum drm_mode_status dp_connector_mode_valid(
struct drm_connector *connector,
struct drm_display_mode *mode,
void *display)
{
return MODE_OK;
}
static inline int dp_connector_get_mode_info(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info,
u32 max_mixer_width, void *display)
{
return 0;
}
static inline int dp_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info, void *display)
{
return 0;
}
static inline void dp_connector_post_open(struct drm_connector *connector,
void *display)
{
}
static inline int dp_drm_bridge_init(void *display, struct drm_encoder *encoder)
{
return 0;
}
static inline void dp_drm_bridge_deinit(void *display)
{
}
static inline void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
struct drm_display_mode *drm_mode)
{
}
static inline int dp_mst_drm_bridge_init(void *display,
struct drm_encoder *encoder)
{
return 0;
}
static inline void dp_mst_drm_bridge_deinit(void *display)
{
}
static inline int dp_mst_init(struct dp_display *dp_display)
{
return 0;
}
static inline int dp_mst_deinit(struct dp_display *dp_display)
{
return 0;
}
#endif
#endif /* _DP_DRM_H_ */

297
msm/dp/dp_gpio_hpd.c Normal file
View File

@@ -0,0 +1,297 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/sde_io_util.h>
#include <linux/of_gpio.h>
#include "dp_gpio_hpd.h"
struct dp_gpio_hpd_private {
struct device *dev;
struct dp_hpd base;
struct dss_gpio gpio_cfg;
struct delayed_work work;
struct dp_hpd_cb *cb;
int irq;
bool hpd;
};
static int dp_gpio_hpd_connect(struct dp_gpio_hpd_private *gpio_hpd, bool hpd)
{
int rc = 0;
if (!gpio_hpd) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
gpio_hpd->base.hpd_high = hpd;
gpio_hpd->base.alt_mode_cfg_done = hpd;
gpio_hpd->base.hpd_irq = false;
if (!gpio_hpd->cb ||
!gpio_hpd->cb->configure ||
!gpio_hpd->cb->disconnect) {
pr_err("invalid cb\n");
rc = -EINVAL;
goto error;
}
if (hpd)
rc = gpio_hpd->cb->configure(gpio_hpd->dev);
else
rc = gpio_hpd->cb->disconnect(gpio_hpd->dev);
error:
return rc;
}
static int dp_gpio_hpd_attention(struct dp_gpio_hpd_private *gpio_hpd)
{
int rc = 0;
if (!gpio_hpd) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
gpio_hpd->base.hpd_irq = true;
if (gpio_hpd->cb && gpio_hpd->cb->attention)
rc = gpio_hpd->cb->attention(gpio_hpd->dev);
error:
return rc;
}
static irqreturn_t dp_gpio_isr(int unused, void *data)
{
struct dp_gpio_hpd_private *gpio_hpd = data;
u32 const disconnect_timeout_retry = 50;
bool hpd;
int i;
if (!gpio_hpd)
return IRQ_NONE;
hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio);
if (!gpio_hpd->hpd && hpd) {
gpio_hpd->hpd = true;
queue_delayed_work(system_wq, &gpio_hpd->work, 0);
return IRQ_HANDLED;
}
if (!gpio_hpd->hpd)
return IRQ_HANDLED;
/* In DP 1.2 spec, 100msec is recommended for the detection
* of HPD connect event. Here we'll poll HPD status for
* 50x2ms = 100ms and if HPD is always low, we know DP is
* disconnected. If HPD is high, HPD_IRQ will be handled
*/
for (i = 0; i < disconnect_timeout_retry; i++) {
if (hpd) {
dp_gpio_hpd_attention(gpio_hpd);
return IRQ_HANDLED;
}
usleep_range(2000, 2100);
hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio);
}
gpio_hpd->hpd = false;
queue_delayed_work(system_wq, &gpio_hpd->work, 0);
return IRQ_HANDLED;
}
static void dp_gpio_hpd_work(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct dp_gpio_hpd_private *gpio_hpd = container_of(dw,
struct dp_gpio_hpd_private, work);
int ret;
if (gpio_hpd->hpd) {
devm_free_irq(gpio_hpd->dev,
gpio_hpd->irq, gpio_hpd);
ret = devm_request_threaded_irq(gpio_hpd->dev,
gpio_hpd->irq, NULL,
dp_gpio_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"dp-gpio-intp", gpio_hpd);
dp_gpio_hpd_connect(gpio_hpd, true);
} else {
devm_free_irq(gpio_hpd->dev,
gpio_hpd->irq, gpio_hpd);
ret = devm_request_threaded_irq(gpio_hpd->dev,
gpio_hpd->irq, NULL,
dp_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"dp-gpio-intp", gpio_hpd);
dp_gpio_hpd_connect(gpio_hpd, false);
}
if (ret < 0)
pr_err("Cannot claim IRQ dp-gpio-intp\n");
}
static int dp_gpio_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd)
{
int rc = 0;
struct dp_gpio_hpd_private *gpio_hpd;
if (!dp_hpd) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
dp_gpio_hpd_connect(gpio_hpd, hpd);
error:
return rc;
}
static int dp_gpio_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo)
{
int rc = 0;
struct dp_gpio_hpd_private *gpio_hpd;
if (!dp_hpd) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
dp_gpio_hpd_attention(gpio_hpd);
error:
return rc;
}
int dp_gpio_hpd_register(struct dp_hpd *dp_hpd)
{
struct dp_gpio_hpd_private *gpio_hpd;
int edge;
int rc = 0;
if (!dp_hpd)
return -EINVAL;
gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
gpio_hpd->hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio);
edge = gpio_hpd->hpd ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
rc = devm_request_threaded_irq(gpio_hpd->dev, gpio_hpd->irq, NULL,
dp_gpio_isr,
edge | IRQF_ONESHOT,
"dp-gpio-intp", gpio_hpd);
if (rc) {
pr_err("Failed to request INTP threaded IRQ: %d\n", rc);
return rc;
}
if (gpio_hpd->hpd)
queue_delayed_work(system_wq, &gpio_hpd->work, 0);
return rc;
}
struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
struct dp_hpd_cb *cb)
{
int rc = 0;
const char *hpd_gpio_name = "qcom,dp-hpd-gpio";
struct dp_gpio_hpd_private *gpio_hpd;
struct dp_pinctrl pinctrl = {0};
if (!dev || !cb) {
pr_err("invalid device\n");
rc = -EINVAL;
goto error;
}
gpio_hpd = devm_kzalloc(dev, sizeof(*gpio_hpd), GFP_KERNEL);
if (!gpio_hpd) {
rc = -ENOMEM;
goto error;
}
pinctrl.pin = devm_pinctrl_get(dev);
if (!IS_ERR_OR_NULL(pinctrl.pin)) {
pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin,
"mdss_dp_hpd_active");
if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) {
rc = pinctrl_select_state(pinctrl.pin,
pinctrl.state_hpd_active);
if (rc) {
pr_err("failed to set hpd active state\n");
goto gpio_error;
}
}
}
gpio_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node,
hpd_gpio_name, 0);
if (!gpio_is_valid(gpio_hpd->gpio_cfg.gpio)) {
pr_err("%s gpio not specified\n", hpd_gpio_name);
rc = -EINVAL;
goto gpio_error;
}
strlcpy(gpio_hpd->gpio_cfg.gpio_name, hpd_gpio_name,
sizeof(gpio_hpd->gpio_cfg.gpio_name));
gpio_hpd->gpio_cfg.value = 0;
rc = gpio_request(gpio_hpd->gpio_cfg.gpio,
gpio_hpd->gpio_cfg.gpio_name);
if (rc) {
pr_err("%s: failed to request gpio\n", hpd_gpio_name);
goto gpio_error;
}
gpio_direction_input(gpio_hpd->gpio_cfg.gpio);
gpio_hpd->dev = dev;
gpio_hpd->cb = cb;
gpio_hpd->irq = gpio_to_irq(gpio_hpd->gpio_cfg.gpio);
INIT_DELAYED_WORK(&gpio_hpd->work, dp_gpio_hpd_work);
gpio_hpd->base.simulate_connect = dp_gpio_hpd_simulate_connect;
gpio_hpd->base.simulate_attention = dp_gpio_hpd_simulate_attention;
gpio_hpd->base.register_hpd = dp_gpio_hpd_register;
return &gpio_hpd->base;
gpio_error:
devm_kfree(dev, gpio_hpd);
error:
return ERR_PTR(rc);
}
void dp_gpio_hpd_put(struct dp_hpd *dp_hpd)
{
struct dp_gpio_hpd_private *gpio_hpd;
if (!dp_hpd)
return;
gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base);
gpio_free(gpio_hpd->gpio_cfg.gpio);
devm_kfree(gpio_hpd->dev, gpio_hpd);
}

32
msm/dp/dp_gpio_hpd.h Normal file
View File

@@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_GPIO_HPD_H_
#define _DP_GPIO_HPD_H_
#include "dp_hpd.h"
/**
* dp_gpio_hpd_get() - configure and get the DisplayPlot HPD module data
*
* @dev: device instance of the caller
* return: pointer to allocated gpio hpd module data
*
* This function sets up the gpio hpd module
*/
struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
struct dp_hpd_cb *cb);
/**
* dp_gpio_hpd_put()
*
* Cleans up dp_hpd instance
*
* @hpd: instance of gpio_hpd
*/
void dp_gpio_hpd_put(struct dp_hpd *hpd);
#endif /* _DP_GPIO_HPD_H_ */

978
msm/dp/dp_hdcp2p2.c Normal file
View File

@@ -0,0 +1,978 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[dp-hdcp2p2] %s: " fmt, __func__
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/kthread.h>
#include <linux/msm_hdcp.h>
#include <linux/kfifo.h>
#include <drm/drm_dp_helper.h>
#include "sde_hdcp_2x.h"
#define DP_INTR_STATUS2 (0x00000024)
#define DP_INTR_STATUS3 (0x00000028)
#define dp_read(offset) readl_relaxed((offset))
#define dp_write(offset, data) writel_relaxed((data), (offset))
#define DP_HDCP_RXCAPS_LENGTH 3
enum dp_hdcp2p2_sink_status {
SINK_DISCONNECTED,
SINK_CONNECTED
};
struct dp_hdcp2p2_ctrl {
DECLARE_KFIFO(cmd_q, enum hdcp_transport_wakeup_cmd, 8);
wait_queue_head_t wait_q;
atomic_t auth_state;
enum dp_hdcp2p2_sink_status sink_status; /* Is sink connected */
struct dp_hdcp2p2_interrupts *intr;
struct sde_hdcp_init_data init_data;
struct mutex mutex; /* mutex to protect access to ctrl */
struct mutex msg_lock; /* mutex to protect access to msg buffer */
struct sde_hdcp_ops *ops;
void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
struct sde_hdcp_2x_ops *lib; /* Ops for driver to call into TZ */
struct task_struct *thread;
struct hdcp2_buffer response;
struct hdcp2_buffer request;
uint32_t total_message_length;
uint32_t timeout;
struct sde_hdcp_2x_msg_part msg_part[HDCP_MAX_MESSAGE_PARTS];
u8 sink_rx_status;
u8 rx_status;
char abort_mask;
bool polling;
};
struct dp_hdcp2p2_int_set {
u32 interrupt;
char *name;
void (*func)(struct dp_hdcp2p2_ctrl *ctrl);
};
struct dp_hdcp2p2_interrupts {
u32 reg;
struct dp_hdcp2p2_int_set *int_set;
};
static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl)
{
if (!ctrl) {
pr_err("invalid input\n");
return -EINVAL;
}
if (!ctrl->lib_ctx) {
pr_err("HDCP library needs to be acquired\n");
return -EINVAL;
}
if (!ctrl->lib) {
pr_err("invalid lib ops data\n");
return -EINVAL;
}
return 0;
}
static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
{
enum hdcp_transport_wakeup_cmd cmd;
if (kfifo_peek(&ctrl->cmd_q, &cmd) &&
cmd == HDCP_TRANSPORT_CMD_AUTHENTICATE)
return true;
if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
return true;
return false;
}
static int dp_hdcp2p2_copy_buf(struct dp_hdcp2p2_ctrl *ctrl,
struct hdcp_transport_wakeup_data *data)
{
int i = 0;
uint32_t num_messages = 0;
if (!data || !data->message_data)
return 0;
mutex_lock(&ctrl->msg_lock);
ctrl->timeout = data->timeout;
num_messages = data->message_data->num_messages;
ctrl->total_message_length = 0; /* Total length of all messages */
for (i = 0; i < num_messages; i++)
ctrl->total_message_length +=
data->message_data->messages[i].length;
memcpy(ctrl->msg_part, data->message_data->messages,
sizeof(data->message_data->messages));
ctrl->rx_status = data->message_data->rx_status;
ctrl->abort_mask = data->abort_mask;
if (!ctrl->total_message_length) {
mutex_unlock(&ctrl->msg_lock);
return 0;
}
ctrl->response.data = data->buf;
ctrl->response.length = ctrl->total_message_length;
ctrl->request.data = data->buf;
ctrl->request.length = ctrl->total_message_length;
mutex_unlock(&ctrl->msg_lock);
return 0;
}
static void dp_hdcp2p2_send_auth_status(struct dp_hdcp2p2_ctrl *ctrl)
{
ctrl->init_data.notify_status(ctrl->init_data.cb_data,
atomic_read(&ctrl->auth_state));
}
static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable)
{
void __iomem *base = ctrl->init_data.dp_ahb->base;
struct dp_hdcp2p2_interrupts *intr = ctrl->intr;
while (intr && intr->reg) {
struct dp_hdcp2p2_int_set *int_set = intr->int_set;
u32 interrupts = 0;
while (int_set && int_set->interrupt) {
interrupts |= int_set->interrupt;
int_set++;
}
if (enable)
dp_write(base + intr->reg,
dp_read(base + intr->reg) | interrupts);
else
dp_write(base + intr->reg,
dp_read(base + intr->reg) & ~interrupts);
intr++;
}
}
static int dp_hdcp2p2_wakeup(struct hdcp_transport_wakeup_data *data)
{
struct dp_hdcp2p2_ctrl *ctrl;
u32 const default_timeout_us = 500;
if (!data) {
pr_err("invalid input\n");
return -EINVAL;
}
ctrl = data->context;
if (!ctrl) {
pr_err("invalid ctrl\n");
return -EINVAL;
}
if (data->timeout)
ctrl->timeout = (data->timeout) * 2;
else
ctrl->timeout = default_timeout_us;
if (dp_hdcp2p2_copy_buf(ctrl, data))
goto exit;
ctrl->polling = false;
switch (data->cmd) {
case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
break;
case HDCP_TRANSPORT_CMD_STATUS_FAILED:
atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
break;
default:
break;
}
kfifo_put(&ctrl->cmd_q, data->cmd);
wake_up(&ctrl->wait_q);
exit:
return 0;
}
static inline void dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl,
struct sde_hdcp_2x_wakeup_data *data)
{
int rc = 0;
if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
data && (data->cmd != HDCP_2X_CMD_INVALID)) {
rc = ctrl->lib->wakeup(data);
if (rc)
pr_err("error sending %s to lib\n",
sde_hdcp_2x_cmd_to_str(data->cmd));
}
}
static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl)
{
if (!ctrl) {
pr_err("invalid input\n");
return;
}
ctrl->sink_status = SINK_DISCONNECTED;
atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
}
static int dp_hdcp2p2_register(void *input, bool mst_enabled)
{
int rc;
enum sde_hdcp_2x_device_type device_type;
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return rc;
if (mst_enabled)
device_type = HDCP_TXMTR_DP_MST;
else
device_type = HDCP_TXMTR_DP;
return sde_hdcp_2x_enable(ctrl->lib_ctx, device_type);
}
static int dp_hdcp2p2_on(void *input)
{
int rc = 0;
struct dp_hdcp2p2_ctrl *ctrl = input;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return rc;
cdata.cmd = HDCP_2X_CMD_START;
cdata.context = ctrl->lib_ctx;
rc = ctrl->lib->wakeup(&cdata);
if (rc)
pr_err("Unable to start the HDCP 2.2 library (%d)\n", rc);
return rc;
}
static void dp_hdcp2p2_off(void *input)
{
int rc;
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return;
if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
cdata.cmd = HDCP_2X_CMD_STOP;
cdata.context = ctrl->lib_ctx;
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
dp_hdcp2p2_set_interrupts(ctrl, false);
dp_hdcp2p2_reset(ctrl);
kthread_park(ctrl->thread);
sde_hdcp_2x_disable(ctrl->lib_ctx);
}
static int dp_hdcp2p2_authenticate(void *input)
{
int rc;
struct dp_hdcp2p2_ctrl *ctrl = input;
struct hdcp_transport_wakeup_data cdata = {
HDCP_TRANSPORT_CMD_AUTHENTICATE};
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return rc;
dp_hdcp2p2_set_interrupts(ctrl, true);
ctrl->sink_status = SINK_CONNECTED;
atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
kthread_unpark(ctrl->thread);
cdata.context = input;
dp_hdcp2p2_wakeup(&cdata);
return rc;
}
static int dp_hdcp2p2_reauthenticate(void *input)
{
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
if (!ctrl) {
pr_err("invalid input\n");
return -EINVAL;
}
dp_hdcp2p2_reset((struct dp_hdcp2p2_ctrl *)input);
return dp_hdcp2p2_authenticate(input);
}
static void dp_hdcp2p2_min_level_change(void *client_ctx,
u8 min_enc_level)
{
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)client_ctx;
struct sde_hdcp_2x_wakeup_data cdata = {
HDCP_2X_CMD_MIN_ENC_LEVEL};
if (!ctrl) {
pr_err("invalid input\n");
return;
}
if (!dp_hdcp2p2_is_valid_state(ctrl)) {
pr_err("invalid state\n");
return;
}
cdata.context = ctrl->lib_ctx;
cdata.min_enc_level = min_enc_level;
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl)
{
int rc = 0, max_size = 16, read_size = 0, bytes_read = 0;
int size = ctrl->request.length, offset = ctrl->msg_part->offset;
u8 *buf = ctrl->request.data;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE ||
atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL) {
pr_err("invalid hdcp state\n");
rc = -EINVAL;
goto exit;
}
if (!buf) {
pr_err("invalid request buffer\n");
rc = -EINVAL;
goto exit;
}
pr_debug("offset(0x%x), size(%d)\n", offset, size);
do {
read_size = min(size, max_size);
bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
offset, buf, read_size);
if (bytes_read != read_size) {
pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
offset, read_size, bytes_read);
rc = -EINVAL;
break;
}
buf += read_size;
offset += read_size;
size -= read_size;
} while (size > 0);
exit:
return rc;
}
static int dp_hdcp2p2_aux_write_message(struct dp_hdcp2p2_ctrl *ctrl,
u8 *buf, int size, uint offset, uint timeout)
{
int const max_size = 16;
int rc = 0, write_size = 0, bytes_written = 0;
pr_debug("offset(0x%x), size(%d)\n", offset, size);
do {
write_size = min(size, max_size);
bytes_written = drm_dp_dpcd_write(ctrl->init_data.drm_aux,
offset, buf, write_size);
if (bytes_written != write_size) {
pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
offset, write_size, bytes_written);
rc = -EINVAL;
break;
}
buf += write_size;
offset += write_size;
size -= write_size;
} while (size > 0);
return rc;
}
static bool dp_hdcp2p2_feature_supported(void *input)
{
int rc;
struct dp_hdcp2p2_ctrl *ctrl = input;
struct sde_hdcp_2x_ops *lib = NULL;
bool supported = false;
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return supported;
lib = ctrl->lib;
if (lib->feature_supported)
supported = lib->feature_supported(
ctrl->lib_ctx);
return supported;
}
static void dp_hdcp2p2_force_encryption(void *data, bool enable)
{
int rc;
struct dp_hdcp2p2_ctrl *ctrl = data;
struct sde_hdcp_2x_ops *lib = NULL;
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return;
lib = ctrl->lib;
if (lib->force_encryption)
lib->force_encryption(ctrl->lib_ctx, enable);
}
static void dp_hdcp2p2_send_msg(struct dp_hdcp2p2_ctrl *ctrl)
{
int rc = 0;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
if (!ctrl) {
pr_err("invalid input\n");
rc = -EINVAL;
goto exit;
}
cdata.context = ctrl->lib_ctx;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
pr_err("hdcp is off\n");
goto exit;
}
mutex_lock(&ctrl->msg_lock);
rc = dp_hdcp2p2_aux_write_message(ctrl, ctrl->response.data,
ctrl->response.length, ctrl->msg_part->offset,
ctrl->timeout);
if (rc) {
pr_err("Error sending msg to sink %d\n", rc);
mutex_unlock(&ctrl->msg_lock);
goto exit;
}
cdata.cmd = HDCP_2X_CMD_MSG_SEND_SUCCESS;
cdata.timeout = ctrl->timeout;
mutex_unlock(&ctrl->msg_lock);
exit:
if (rc == -ETIMEDOUT)
cdata.cmd = HDCP_2X_CMD_MSG_SEND_TIMEOUT;
else if (rc)
cdata.cmd = HDCP_2X_CMD_MSG_SEND_FAILED;
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl)
{
int rc = 0;
struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID };
cdata.context = ctrl->lib_ctx;
rc = dp_hdcp2p2_aux_read_message(ctrl);
if (rc) {
pr_err("error reading message %d\n", rc);
goto exit;
}
cdata.total_message_length = ctrl->total_message_length;
cdata.timeout = ctrl->timeout;
exit:
if (rc == -ETIMEDOUT)
cdata.cmd = HDCP_2X_CMD_MSG_RECV_TIMEOUT;
else if (rc)
cdata.cmd = HDCP_2X_CMD_MSG_RECV_FAILED;
else
cdata.cmd = HDCP_2X_CMD_MSG_RECV_SUCCESS;
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
return rc;
}
static void dp_hdcp2p2_recv_msg(struct dp_hdcp2p2_ctrl *ctrl)
{
struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID };
cdata.context = ctrl->lib_ctx;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
pr_err("hdcp is off\n");
return;
}
dp_hdcp2p2_get_msg_from_sink(ctrl);
}
static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl)
{
int rc = 0, retries = 10;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
if (!ctrl) {
pr_err("invalid input\n");
return;
}
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
pr_err("invalid hdcp state\n");
return;
}
cdata.context = ctrl->lib_ctx;
if (ctrl->sink_rx_status & ctrl->abort_mask) {
if (ctrl->sink_rx_status & BIT(3))
pr_err("reauth_req set by sink\n");
if (ctrl->sink_rx_status & BIT(4))
pr_err("link failure reported by sink\n");
ctrl->sink_rx_status = 0;
ctrl->rx_status = 0;
rc = -ENOLINK;
cdata.cmd = HDCP_2X_CMD_LINK_FAILED;
atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
goto exit;
}
/* wait for polling to start till spec allowed timeout */
while (!ctrl->polling && retries--)
msleep(20);
/* check if sink has made a message available */
if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
ctrl->sink_rx_status = 0;
ctrl->rx_status = 0;
dp_hdcp2p2_get_msg_from_sink(ctrl);
ctrl->polling = false;
}
exit:
if (rc)
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl)
{
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH};
cdata.context = ctrl->lib_ctx;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
u8 *rx_status)
{
u32 const cp_irq_dpcd_offset = 0x201;
u32 const rxstatus_dpcd_offset = 0x69493;
ssize_t const bytes_to_read = 1;
ssize_t bytes_read = 0;
u8 buf = 0;
int rc = 0;
bool cp_irq = false;
*rx_status = 0;
bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
cp_irq_dpcd_offset, &buf, bytes_to_read);
if (bytes_read != bytes_to_read) {
pr_err("cp irq read failed\n");
rc = bytes_read;
goto error;
}
cp_irq = buf & BIT(2);
pr_debug("cp_irq=0x%x\n", cp_irq);
buf = 0;
if (cp_irq) {
bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
rxstatus_dpcd_offset, &buf, bytes_to_read);
if (bytes_read != bytes_to_read) {
pr_err("rxstatus read failed\n");
rc = bytes_read;
goto error;
}
*rx_status = buf;
pr_debug("rx_status=0x%x\n", *rx_status);
}
error:
return rc;
}
static int dp_hdcp2p2_cp_irq(void *input)
{
int rc;
struct dp_hdcp2p2_ctrl *ctrl = input;
rc = dp_hdcp2p2_valid_handle(ctrl);
if (rc)
return rc;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
pr_err("invalid hdcp state\n");
return -EINVAL;
}
ctrl->sink_rx_status = 0;
rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
if (rc) {
pr_err("failed to read rx status\n");
return rc;
}
pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
if (!ctrl->sink_rx_status) {
pr_debug("not a hdcp 2.2 irq\n");
return -EINVAL;
}
kfifo_put(&ctrl->cmd_q, HDCP_TRANSPORT_CMD_LINK_CHECK);
wake_up(&ctrl->wait_q);
return 0;
}
static int dp_hdcp2p2_isr(void *input)
{
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
int rc = 0;
struct dss_io_data *io;
struct dp_hdcp2p2_interrupts *intr;
u32 hdcp_int_val = 0;
if (!ctrl || !ctrl->init_data.dp_ahb) {
pr_err("invalid input\n");
rc = -EINVAL;
goto end;
}
io = ctrl->init_data.dp_ahb;
intr = ctrl->intr;
while (intr && intr->reg) {
struct dp_hdcp2p2_int_set *int_set = intr->int_set;
hdcp_int_val = dp_read(io->base + intr->reg);
while (int_set && int_set->interrupt) {
if (hdcp_int_val & (int_set->interrupt >> 2)) {
pr_debug("%s\n", int_set->name);
if (int_set->func)
int_set->func(ctrl);
dp_write(io->base + intr->reg, hdcp_int_val |
(int_set->interrupt >> 1));
}
int_set++;
}
intr++;
}
end:
return rc;
}
static bool dp_hdcp2p2_supported(void *input)
{
struct dp_hdcp2p2_ctrl *ctrl = input;
u32 const rxcaps_dpcd_offset = 0x6921d;
ssize_t bytes_read = 0;
u8 buf[DP_HDCP_RXCAPS_LENGTH];
pr_debug("Checking sink capability\n");
bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
rxcaps_dpcd_offset, &buf, DP_HDCP_RXCAPS_LENGTH);
if (bytes_read != DP_HDCP_RXCAPS_LENGTH) {
pr_err("RxCaps read failed\n");
goto error;
}
pr_debug("HDCP_CAPABLE=%lu\n", (buf[2] & BIT(1)) >> 1);
pr_debug("VERSION=%d\n", buf[0]);
if ((buf[2] & BIT(1)) && (buf[0] == 0x2))
return true;
error:
return false;
}
static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl,
struct sde_hdcp_2x_wakeup_data *cdata)
{
if (!ctrl || cdata->num_streams == 0 || !cdata->streams) {
pr_err("invalid input\n");
return -EINVAL;
}
if (!ctrl->lib_ctx) {
pr_err("HDCP library needs to be acquired\n");
return -EINVAL;
}
if (!ctrl->lib) {
pr_err("invalid lib ops data\n");
return -EINVAL;
}
cdata->context = ctrl->lib_ctx;
return ctrl->lib->wakeup(cdata);
}
static int dp_hdcp2p2_register_streams(void *input, u8 num_streams,
struct stream_info *streams)
{
struct dp_hdcp2p2_ctrl *ctrl = input;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS};
cdata.streams = streams;
cdata.num_streams = num_streams;
return dp_hdcp2p2_change_streams(ctrl, &cdata);
}
static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams,
struct stream_info *streams)
{
struct dp_hdcp2p2_ctrl *ctrl = input;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS};
cdata.streams = streams;
cdata.num_streams = num_streams;
return dp_hdcp2p2_change_streams(ctrl, &cdata);
}
void sde_dp_hdcp2p2_deinit(void *input)
{
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
if (!ctrl) {
pr_err("invalid input\n");
return;
}
if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
cdata.cmd = HDCP_2X_CMD_STOP;
cdata.context = ctrl->lib_ctx;
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
sde_hdcp_2x_deregister(ctrl->lib_ctx);
kthread_stop(ctrl->thread);
mutex_destroy(&ctrl->mutex);
mutex_destroy(&ctrl->msg_lock);
kfree(ctrl);
}
static int dp_hdcp2p2_main(void *data)
{
struct dp_hdcp2p2_ctrl *ctrl = data;
enum hdcp_transport_wakeup_cmd cmd;
while (1) {
wait_event(ctrl->wait_q,
!kfifo_is_empty(&ctrl->cmd_q) ||
kthread_should_stop() ||
kthread_should_park());
if (kthread_should_stop())
break;
if (kfifo_is_empty(&ctrl->cmd_q) && kthread_should_park()) {
kthread_parkme();
continue;
}
if (!kfifo_get(&ctrl->cmd_q, &cmd))
continue;
switch (cmd) {
case HDCP_TRANSPORT_CMD_SEND_MESSAGE:
dp_hdcp2p2_send_msg(ctrl);
break;
case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
if (ctrl->rx_status)
ctrl->polling = true;
else
dp_hdcp2p2_recv_msg(ctrl);
break;
case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
dp_hdcp2p2_send_auth_status(ctrl);
break;
case HDCP_TRANSPORT_CMD_STATUS_FAILED:
dp_hdcp2p2_set_interrupts(ctrl, false);
dp_hdcp2p2_send_auth_status(ctrl);
break;
case HDCP_TRANSPORT_CMD_LINK_POLL:
ctrl->polling = true;
break;
case HDCP_TRANSPORT_CMD_LINK_CHECK:
dp_hdcp2p2_link_check(ctrl);
break;
case HDCP_TRANSPORT_CMD_AUTHENTICATE:
dp_hdcp2p2_start_auth(ctrl);
break;
default:
break;
}
}
return 0;
}
void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
{
int rc;
struct dp_hdcp2p2_ctrl *ctrl;
static struct sde_hdcp_ops ops = {
.isr = dp_hdcp2p2_isr,
.reauthenticate = dp_hdcp2p2_reauthenticate,
.authenticate = dp_hdcp2p2_authenticate,
.feature_supported = dp_hdcp2p2_feature_supported,
.force_encryption = dp_hdcp2p2_force_encryption,
.sink_support = dp_hdcp2p2_supported,
.set_mode = dp_hdcp2p2_register,
.on = dp_hdcp2p2_on,
.off = dp_hdcp2p2_off,
.cp_irq = dp_hdcp2p2_cp_irq,
.register_streams = dp_hdcp2p2_register_streams,
.deregister_streams = dp_hdcp2p2_deregister_streams,
};
static struct hdcp_transport_ops client_ops = {
.wakeup = dp_hdcp2p2_wakeup,
};
static struct dp_hdcp2p2_int_set int_set1[] = {
{BIT(17), "authentication successful", NULL},
{BIT(20), "authentication failed", NULL},
{BIT(24), "encryption enabled", NULL},
{BIT(27), "encryption disabled", NULL},
{0},
};
static struct dp_hdcp2p2_int_set int_set2[] = {
{BIT(2), "key fifo underflow", NULL},
{0},
};
static struct dp_hdcp2p2_interrupts intr[] = {
{DP_INTR_STATUS2, int_set1},
{DP_INTR_STATUS3, int_set2},
{0}
};
static struct sde_hdcp_2x_ops hdcp2x_ops;
struct sde_hdcp_2x_register_data register_data = {0};
if (!init_data || !init_data->cb_data ||
!init_data->notify_status || !init_data->drm_aux) {
pr_err("invalid input\n");
return ERR_PTR(-EINVAL);
}
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return ERR_PTR(-ENOMEM);
ctrl->init_data = *init_data;
ctrl->lib = &hdcp2x_ops;
ctrl->response.data = NULL;
ctrl->request.data = NULL;
ctrl->sink_status = SINK_DISCONNECTED;
ctrl->intr = intr;
INIT_KFIFO(ctrl->cmd_q);
init_waitqueue_head(&ctrl->wait_q);
atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
ctrl->ops = &ops;
mutex_init(&ctrl->mutex);
mutex_init(&ctrl->msg_lock);
register_data.hdcp_data = &ctrl->lib_ctx;
register_data.client_ops = &client_ops;
register_data.ops = &hdcp2x_ops;
register_data.client_data = ctrl;
rc = sde_hdcp_2x_register(&register_data);
if (rc) {
pr_err("Unable to register with HDCP 2.2 library\n");
goto error;
}
if (IS_ENABLED(CONFIG_HDCP_QSEECOM))
msm_hdcp_register_cb(init_data->msm_hdcp_dev, ctrl,
dp_hdcp2p2_min_level_change);
ctrl->thread = kthread_run(dp_hdcp2p2_main, ctrl, "dp_hdcp2p2");
if (IS_ERR(ctrl->thread)) {
pr_err("unable to start DP hdcp2p2 thread\n");
rc = PTR_ERR(ctrl->thread);
ctrl->thread = NULL;
goto error;
}
return ctrl;
error:
kfree(ctrl);
return ERR_PTR(rc);
}
struct sde_hdcp_ops *sde_dp_hdcp2p2_get(void *input)
{
return ((struct dp_hdcp2p2_ctrl *)input)->ops;
}

99
msm/dp/dp_hpd.c Normal file
View File

@@ -0,0 +1,99 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include "dp_hpd.h"
#include "dp_usbpd.h"
#include "dp_gpio_hpd.h"
#include "dp_lphw_hpd.h"
static void dp_hpd_host_init(struct dp_hpd *dp_hpd,
struct dp_catalog_hpd *catalog)
{
if (!catalog) {
pr_err("invalid input\n");
return;
}
catalog->config_hpd(catalog, true);
}
static void dp_hpd_host_deinit(struct dp_hpd *dp_hpd,
struct dp_catalog_hpd *catalog)
{
if (!catalog) {
pr_err("invalid input\n");
return;
}
catalog->config_hpd(catalog, false);
}
static void dp_hpd_isr(struct dp_hpd *dp_hpd)
{
}
struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb)
{
struct dp_hpd *dp_hpd;
if (parser->no_aux_switch && parser->lphw_hpd) {
dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb);
if (IS_ERR(dp_hpd)) {
pr_err("failed to get lphw hpd\n");
return dp_hpd;
}
dp_hpd->type = DP_HPD_LPHW;
} else if (parser->no_aux_switch) {
dp_hpd = dp_gpio_hpd_get(dev, cb);
if (IS_ERR(dp_hpd)) {
pr_err("failed to get gpio hpd\n");
return dp_hpd;
}
dp_hpd->type = DP_HPD_GPIO;
} else {
dp_hpd = dp_usbpd_get(dev, cb);
if (IS_ERR(dp_hpd)) {
pr_err("failed to get usbpd\n");
return dp_hpd;
}
dp_hpd->type = DP_HPD_USBPD;
}
if (!dp_hpd->host_init)
dp_hpd->host_init = dp_hpd_host_init;
if (!dp_hpd->host_deinit)
dp_hpd->host_deinit = dp_hpd_host_deinit;
if (!dp_hpd->isr)
dp_hpd->isr = dp_hpd_isr;
return dp_hpd;
}
void dp_hpd_put(struct dp_hpd *dp_hpd)
{
if (!dp_hpd)
return;
switch (dp_hpd->type) {
case DP_HPD_USBPD:
dp_usbpd_put(dp_hpd);
break;
case DP_HPD_GPIO:
dp_gpio_hpd_put(dp_hpd);
break;
case DP_HPD_LPHW:
dp_lphw_hpd_put(dp_hpd);
break;
default:
pr_err("unknown hpd type %d\n", dp_hpd->type);
break;
}
}

96
msm/dp/dp_hpd.h Normal file
View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_HPD_H_
#define _DP_HPD_H_
#include <linux/types.h>
#include "dp_parser.h"
#include "dp_catalog.h"
struct device;
/**
* enum dp_hpd_type - dp hpd type
* @DP_HPD_USBPD: USB type-c based HPD
* @DP_HPD_GPIO: GPIO based HPD
* @DP_HPD_BUILTIN: Controller built-in HPD
*/
enum dp_hpd_type {
DP_HPD_USBPD,
DP_HPD_GPIO,
DP_HPD_LPHW,
DP_HPD_BUILTIN,
};
/**
* struct dp_hpd_cb - callback functions provided by the client
*
* @configure: called when dp connection is ready.
* @disconnect: notify the cable disconnect event.
* @attention: notify any attention message event.
*/
struct dp_hpd_cb {
int (*configure)(struct device *dev);
int (*disconnect)(struct device *dev);
int (*attention)(struct device *dev);
};
/**
* struct dp_hpd - DisplayPort HPD status
*
* @type: type of HPD
* @orientation: plug orientation configuration, USBPD type only.
* @hpd_high: Hot Plug Detect signal is high.
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @multi_func: multi-function preferred, USBPD type only
* @isr: event interrupt, BUILTIN and LPHW type only
* @register_hpd: register hardware callback
* @host_init: source or host side setup for hpd
* @host_deinit: source or host side de-initializations
* @simulate_connect: simulate disconnect or connect for debug mode
* @simulate_attention: simulate attention messages for debug mode
*/
struct dp_hpd {
enum dp_hpd_type type;
u32 orientation;
bool hpd_high;
bool hpd_irq;
bool alt_mode_cfg_done;
bool multi_func;
void (*isr)(struct dp_hpd *dp_hpd);
int (*register_hpd)(struct dp_hpd *dp_hpd);
void (*host_init)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog);
void (*host_deinit)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog);
int (*simulate_connect)(struct dp_hpd *dp_hpd, bool hpd);
int (*simulate_attention)(struct dp_hpd *dp_hpd, int vdo);
};
/**
* dp_hpd_get() - configure and get the DisplayPlot HPD module data
*
* @dev: device instance of the caller
* @parser: DP parser
* @cb: callback function for HPD response
* return: pointer to allocated hpd module data
*
* This function sets up the hpd module
*/
struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb);
/**
* dp_hpd_put()
*
* Cleans up dp_hpd instance
*
* @dp_hpd: instance of dp_hpd
*/
void dp_hpd_put(struct dp_hpd *dp_hpd);
#endif /* _DP_HPD_H_ */

1526
msm/dp/dp_link.c Normal file

File diff suppressed because it is too large Load Diff

205
msm/dp/dp_link.h Normal file
View File

@@ -0,0 +1,205 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_LINK_H_
#define _DP_LINK_H_
#include "dp_aux.h"
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
#define DP_LINK_ENUM_STR(x) #x
enum dp_link_voltage_level {
DP_LINK_VOLTAGE_LEVEL_0 = 0,
DP_LINK_VOLTAGE_LEVEL_1 = 1,
DP_LINK_VOLTAGE_LEVEL_2 = 2,
DP_LINK_VOLTAGE_MAX = DP_LINK_VOLTAGE_LEVEL_2,
};
enum dp_link_preemaphasis_level {
DP_LINK_PRE_EMPHASIS_LEVEL_0 = 0,
DP_LINK_PRE_EMPHASIS_LEVEL_1 = 1,
DP_LINK_PRE_EMPHASIS_LEVEL_2 = 2,
DP_LINK_PRE_EMPHASIS_MAX = DP_LINK_PRE_EMPHASIS_LEVEL_2,
};
struct dp_link_sink_count {
u32 count;
bool cp_ready;
};
struct dp_link_test_video {
u32 test_video_pattern;
u32 test_bit_depth;
u32 test_dyn_range;
u32 test_h_total;
u32 test_v_total;
u32 test_h_start;
u32 test_v_start;
u32 test_hsync_pol;
u32 test_hsync_width;
u32 test_vsync_pol;
u32 test_vsync_width;
u32 test_h_width;
u32 test_v_height;
u32 test_rr_d;
u32 test_rr_n;
};
struct dp_link_test_audio {
u32 test_audio_sampling_rate;
u32 test_audio_channel_count;
u32 test_audio_pattern_type;
u32 test_audio_period_ch_1;
u32 test_audio_period_ch_2;
u32 test_audio_period_ch_3;
u32 test_audio_period_ch_4;
u32 test_audio_period_ch_5;
u32 test_audio_period_ch_6;
u32 test_audio_period_ch_7;
u32 test_audio_period_ch_8;
};
struct dp_link_hdcp_status {
int hdcp_state;
int hdcp_version;
};
struct dp_link_phy_params {
u32 phy_test_pattern_sel;
u8 v_level;
u8 p_level;
};
struct dp_link_params {
u32 lane_count;
u32 bw_code;
};
static inline char *dp_link_get_test_name(u32 test_requested)
{
switch (test_requested) {
case DP_TEST_LINK_TRAINING:
return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING);
case DP_TEST_LINK_VIDEO_PATTERN:
return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN);
case DP_TEST_LINK_EDID_READ:
return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ);
case DP_TEST_LINK_PHY_TEST_PATTERN:
return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN);
case DP_TEST_LINK_AUDIO_PATTERN:
return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN);
case DS_PORT_STATUS_CHANGED:
return DP_LINK_ENUM_STR(DS_PORT_STATUS_CHANGED);
case DP_LINK_STATUS_UPDATED:
return DP_LINK_ENUM_STR(DP_LINK_STATUS_UPDATED);
default:
return "unknown";
}
}
struct dp_link {
u32 sink_request;
u32 test_response;
struct dp_link_sink_count sink_count;
struct dp_link_test_video test_video;
struct dp_link_test_audio test_audio;
struct dp_link_phy_params phy_params;
struct dp_link_params link_params;
struct dp_link_hdcp_status hdcp_status;
u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
int (*process_request)(struct dp_link *dp_link);
int (*get_colorimetry_config)(struct dp_link *dp_link);
int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
int (*send_psm_request)(struct dp_link *dp_link, bool req);
void (*send_test_response)(struct dp_link *dp_link);
int (*psm_config)(struct dp_link *dp_link,
struct drm_dp_link *link_info, bool enable);
void (*send_edid_checksum)(struct dp_link *dp_link, u8 checksum);
};
static inline char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
{
switch (phy_test_pattern_sel) {
case DP_TEST_PHY_PATTERN_NONE:
return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_NONE);
case DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING:
return DP_LINK_ENUM_STR(
DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING);
case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
return DP_LINK_ENUM_STR(
DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT);
case DP_TEST_PHY_PATTERN_PRBS7:
return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_PRBS7);
case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
return DP_LINK_ENUM_STR(
DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN);
case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_1);
case DP_TEST_PHY_PATTERN_CP2520_PATTERN_2:
return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_2);
case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_3);
default:
return "unknown";
}
}
/**
* mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
* @tbd: test bit depth
*
* Returns the bits per pixel (bpp) to be used corresponding to the
* git bit depth value. This function assumes that bit depth has
* already been validated.
*/
static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
{
u32 bpp;
/*
* Few simplistic rules and assumptions made here:
* 1. Bit depth is per color component
* 2. If bit depth is unknown return 0
* 3. Assume 3 color components
*/
switch (tbd) {
case DP_TEST_BIT_DEPTH_6:
bpp = 18;
break;
case DP_TEST_BIT_DEPTH_8:
bpp = 24;
break;
case DP_TEST_BIT_DEPTH_10:
bpp = 30;
break;
case DP_TEST_BIT_DEPTH_UNKNOWN:
default:
bpp = 0;
}
return bpp;
}
/**
* dp_link_get() - get the functionalities of dp test module
*
*
* return: a pointer to dp_link struct
*/
struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux);
/**
* dp_link_put() - releases the dp test module's resources
*
* @dp_link: an instance of dp_link module
*
*/
void dp_link_put(struct dp_link *dp_link);
#endif /* _DP_LINK_H_ */

422
msm/dp/dp_lphw_hpd.c Normal file
View File

@@ -0,0 +1,422 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/sde_io_util.h>
#include <linux/of_gpio.h>
#include "dp_lphw_hpd.h"
struct dp_lphw_hpd_private {
struct device *dev;
struct dp_hpd base;
struct dp_parser *parser;
struct dp_catalog_hpd *catalog;
struct dss_gpio gpio_cfg;
struct workqueue_struct *connect_wq;
struct delayed_work work;
struct work_struct connect;
struct work_struct disconnect;
struct work_struct attention;
struct dp_hpd_cb *cb;
int irq;
bool hpd;
};
static void dp_lphw_hpd_attention(struct work_struct *work)
{
struct dp_lphw_hpd_private *lphw_hpd = container_of(work,
struct dp_lphw_hpd_private, attention);
if (!lphw_hpd) {
pr_err("invalid input\n");
return;
}
lphw_hpd->base.hpd_irq = true;
if (lphw_hpd->cb && lphw_hpd->cb->attention)
lphw_hpd->cb->attention(lphw_hpd->dev);
}
static void dp_lphw_hpd_connect(struct work_struct *work)
{
struct dp_lphw_hpd_private *lphw_hpd = container_of(work,
struct dp_lphw_hpd_private, connect);
if (!lphw_hpd) {
pr_err("invalid input\n");
return;
}
lphw_hpd->base.hpd_high = true;
lphw_hpd->base.alt_mode_cfg_done = true;
lphw_hpd->base.hpd_irq = false;
if (lphw_hpd->cb && lphw_hpd->cb->configure)
lphw_hpd->cb->configure(lphw_hpd->dev);
}
static void dp_lphw_hpd_disconnect(struct work_struct *work)
{
struct dp_lphw_hpd_private *lphw_hpd = container_of(work,
struct dp_lphw_hpd_private, disconnect);
if (!lphw_hpd) {
pr_err("invalid input\n");
return;
}
lphw_hpd->base.hpd_high = false;
lphw_hpd->base.alt_mode_cfg_done = false;
lphw_hpd->base.hpd_irq = false;
if (lphw_hpd->cb && lphw_hpd->cb->disconnect)
lphw_hpd->cb->disconnect(lphw_hpd->dev);
}
static irqreturn_t dp_tlmm_isr(int unused, void *data)
{
struct dp_lphw_hpd_private *lphw_hpd = data;
bool hpd;
if (!lphw_hpd)
return IRQ_NONE;
/*
* According to the DP spec, HPD high event can be confirmed only after
* the HPD line has een asserted continuously for more than 100ms
*/
usleep_range(99000, 100000);
hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio);
pr_debug("lphw_hpd state = %d, new hpd state = %d\n",
lphw_hpd->hpd, hpd);
if (!lphw_hpd->hpd && hpd) {
lphw_hpd->hpd = true;
queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect);
}
return IRQ_HANDLED;
}
static void dp_lphw_hpd_host_init(struct dp_hpd *dp_hpd,
struct dp_catalog_hpd *catalog)
{
struct dp_lphw_hpd_private *lphw_hpd;
if (!dp_hpd) {
pr_err("invalid input\n");
return;
}
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, true);
/*
* Changing the gpio function to dp controller for the hpd line is not
* stopping the tlmm interrupts generation on function 0.
* So, as an additional step, disable the gpio interrupt irq also
*/
disable_irq(lphw_hpd->irq);
}
static void dp_lphw_hpd_host_deinit(struct dp_hpd *dp_hpd,
struct dp_catalog_hpd *catalog)
{
struct dp_lphw_hpd_private *lphw_hpd;
if (!dp_hpd) {
pr_err("invalid input\n");
return;
}
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
/* Enable the tlmm interrupt irq which was disabled in host_init */
enable_irq(lphw_hpd->irq);
lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, false);
}
static void dp_lphw_hpd_isr(struct dp_hpd *dp_hpd)
{
struct dp_lphw_hpd_private *lphw_hpd;
u32 isr = 0;
int rc = 0;
if (!dp_hpd) {
pr_err("invalid input\n");
return;
}
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
isr = lphw_hpd->catalog->get_interrupt(lphw_hpd->catalog);
if (isr & DP_HPD_UNPLUG_INT_STATUS) { /* disconnect interrupt */
pr_debug("disconnect interrupt, hpd isr state: 0x%x\n", isr);
if (lphw_hpd->base.hpd_high) {
lphw_hpd->hpd = false;
lphw_hpd->base.hpd_high = false;
lphw_hpd->base.alt_mode_cfg_done = false;
lphw_hpd->base.hpd_irq = false;
rc = queue_work(lphw_hpd->connect_wq,
&lphw_hpd->disconnect);
if (!rc)
pr_debug("disconnect not queued\n");
} else {
pr_err("already disconnected\n");
}
} else if (isr & DP_IRQ_HPD_INT_STATUS) { /* attention interrupt */
pr_debug("hpd_irq interrupt, hpd isr state: 0x%x\n", isr);
rc = queue_work(lphw_hpd->connect_wq, &lphw_hpd->attention);
if (!rc)
pr_debug("attention not queued\n");
}
}
static int dp_lphw_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd)
{
struct dp_lphw_hpd_private *lphw_hpd;
if (!dp_hpd) {
pr_err("invalid input\n");
return -EINVAL;
}
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
lphw_hpd->base.hpd_high = hpd;
lphw_hpd->base.alt_mode_cfg_done = hpd;
lphw_hpd->base.hpd_irq = false;
if (!lphw_hpd->cb || !lphw_hpd->cb->configure ||
!lphw_hpd->cb->disconnect) {
pr_err("invalid callback\n");
return -EINVAL;
}
if (hpd)
lphw_hpd->cb->configure(lphw_hpd->dev);
else
lphw_hpd->cb->disconnect(lphw_hpd->dev);
return 0;
}
static int dp_lphw_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo)
{
struct dp_lphw_hpd_private *lphw_hpd;
if (!dp_hpd) {
pr_err("invalid input\n");
return -EINVAL;
}
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
lphw_hpd->base.hpd_irq = true;
if (lphw_hpd->cb && lphw_hpd->cb->attention)
lphw_hpd->cb->attention(lphw_hpd->dev);
return 0;
}
int dp_lphw_hpd_register(struct dp_hpd *dp_hpd)
{
struct dp_lphw_hpd_private *lphw_hpd;
int rc = 0;
if (!dp_hpd)
return -EINVAL;
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
lphw_hpd->hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio);
rc = devm_request_threaded_irq(lphw_hpd->dev, lphw_hpd->irq, NULL,
dp_tlmm_isr,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"dp-gpio-intp", lphw_hpd);
if (rc) {
pr_err("Failed to request INTP threaded IRQ: %d\n", rc);
return rc;
}
enable_irq_wake(lphw_hpd->irq);
if (lphw_hpd->hpd)
queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect);
return rc;
}
static void dp_lphw_hpd_deinit(struct dp_lphw_hpd_private *lphw_hpd)
{
struct dp_parser *parser = lphw_hpd->parser;
int i = 0;
for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) {
if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name,
"hpd-pwr")) {
/* disable the hpd-pwr voltage regulator */
if (msm_dss_enable_vreg(
&parser->mp[DP_PHY_PM].vreg_config[i], 1,
false))
pr_err("hpd-pwr vreg not disabled\n");
break;
}
}
}
static void dp_lphw_hpd_init(struct dp_lphw_hpd_private *lphw_hpd)
{
struct dp_pinctrl pinctrl = {0};
struct dp_parser *parser = lphw_hpd->parser;
int i = 0, rc = 0;
for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) {
if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name,
"hpd-pwr")) {
/* enable the hpd-pwr voltage regulator */
if (msm_dss_enable_vreg(
&parser->mp[DP_PHY_PM].vreg_config[i], 1,
true))
pr_err("hpd-pwr vreg not enabled\n");
break;
}
}
pinctrl.pin = devm_pinctrl_get(lphw_hpd->dev);
if (!IS_ERR_OR_NULL(pinctrl.pin)) {
pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin,
"mdss_dp_hpd_active");
if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) {
rc = pinctrl_select_state(pinctrl.pin,
pinctrl.state_hpd_active);
if (rc)
pr_err("failed to set hpd_active state\n");
}
}
}
static int dp_lphw_hpd_create_workqueue(struct dp_lphw_hpd_private *lphw_hpd)
{
lphw_hpd->connect_wq = create_singlethread_workqueue("dp_lphw_work");
if (IS_ERR_OR_NULL(lphw_hpd->connect_wq)) {
pr_err("Error creating connect_wq\n");
return -EPERM;
}
INIT_WORK(&lphw_hpd->connect, dp_lphw_hpd_connect);
INIT_WORK(&lphw_hpd->disconnect, dp_lphw_hpd_disconnect);
INIT_WORK(&lphw_hpd->attention, dp_lphw_hpd_attention);
return 0;
}
struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser,
struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb)
{
int rc = 0;
const char *hpd_gpio_name = "qcom,dp-hpd-gpio";
struct dp_lphw_hpd_private *lphw_hpd;
if (!dev || !parser || !cb) {
pr_err("invalid device\n");
rc = -EINVAL;
goto error;
}
lphw_hpd = devm_kzalloc(dev, sizeof(*lphw_hpd), GFP_KERNEL);
if (!lphw_hpd) {
rc = -ENOMEM;
goto error;
}
lphw_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node,
hpd_gpio_name, 0);
if (!gpio_is_valid(lphw_hpd->gpio_cfg.gpio)) {
pr_err("%s gpio not specified\n", hpd_gpio_name);
rc = -EINVAL;
goto gpio_error;
}
strlcpy(lphw_hpd->gpio_cfg.gpio_name, hpd_gpio_name,
sizeof(lphw_hpd->gpio_cfg.gpio_name));
lphw_hpd->gpio_cfg.value = 0;
rc = gpio_request(lphw_hpd->gpio_cfg.gpio,
lphw_hpd->gpio_cfg.gpio_name);
if (rc) {
pr_err("%s: failed to request gpio\n", hpd_gpio_name);
goto gpio_error;
}
gpio_direction_input(lphw_hpd->gpio_cfg.gpio);
lphw_hpd->dev = dev;
lphw_hpd->cb = cb;
lphw_hpd->irq = gpio_to_irq(lphw_hpd->gpio_cfg.gpio);
rc = dp_lphw_hpd_create_workqueue(lphw_hpd);
if (rc) {
pr_err("Failed to create a dp_hpd workqueue\n");
goto gpio_error;
}
lphw_hpd->parser = parser;
lphw_hpd->catalog = catalog;
lphw_hpd->base.isr = dp_lphw_hpd_isr;
lphw_hpd->base.host_init = dp_lphw_hpd_host_init;
lphw_hpd->base.host_deinit = dp_lphw_hpd_host_deinit;
lphw_hpd->base.simulate_connect = dp_lphw_hpd_simulate_connect;
lphw_hpd->base.simulate_attention = dp_lphw_hpd_simulate_attention;
lphw_hpd->base.register_hpd = dp_lphw_hpd_register;
dp_lphw_hpd_init(lphw_hpd);
return &lphw_hpd->base;
gpio_error:
devm_kfree(dev, lphw_hpd);
error:
return ERR_PTR(rc);
}
void dp_lphw_hpd_put(struct dp_hpd *dp_hpd)
{
struct dp_lphw_hpd_private *lphw_hpd;
if (!dp_hpd)
return;
lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base);
dp_lphw_hpd_deinit(lphw_hpd);
gpio_free(lphw_hpd->gpio_cfg.gpio);
devm_kfree(lphw_hpd->dev, lphw_hpd);
}

36
msm/dp/dp_lphw_hpd.h Normal file
View File

@@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_LPHW_HPD_H_
#define _DP_LPHW_HPD_H_
#include "dp_hpd.h"
#define DP_HPD_PLUG_INT_STATUS BIT(0)
#define DP_IRQ_HPD_INT_STATUS BIT(1)
#define DP_HPD_REPLUG_INT_STATUS BIT(2)
#define DP_HPD_UNPLUG_INT_STATUS BIT(3)
/**
* dp_lphw_hpd_get() - configure and get the DisplayPlot HPD module data
*
* @dev: device instance of the caller
* return: pointer to allocated gpio hpd module data
*
* This function sets up the lphw hpd module
*/
struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser,
struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb);
/**
* dp_lphw_hpd_put()
*
* Cleans up dp_hpd instance
*
* @hpd: instance of lphw_hpd
*/
void dp_lphw_hpd_put(struct dp_hpd *hpd);
#endif /* _DP_LPHW_HPD_H_ */

2014
msm/dp/dp_mst_drm.c Normal file

File diff suppressed because it is too large Load Diff

3004
msm/dp/dp_panel.c Normal file

File diff suppressed because it is too large Load Diff

229
msm/dp/dp_panel.h Normal file
View File

@@ -0,0 +1,229 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
#include <drm/msm_drm.h>
#include "dp_aux.h"
#include "dp_link.h"
#include "dp_usbpd.h"
#include "sde_edid_parser.h"
#include "sde_connector.h"
#include "msm_drv.h"
#define DP_RECEIVER_DSC_CAP_SIZE 15
#define DP_RECEIVER_FEC_STATUS_SIZE 3
/*
* A source initiated power down flag is set
* when the DP is powered off while physical
* DP cable is still connected i.e. without
* HPD or not initiated by sink like HPD_IRQ.
* This can happen if framework reboots or
* device suspends.
*/
#define DP_PANEL_SRC_INITIATED_POWER_DOWN BIT(0)
enum dp_lane_count {
DP_LANE_COUNT_1 = 1,
DP_LANE_COUNT_2 = 2,
DP_LANE_COUNT_4 = 4,
};
#define DP_MAX_DOWNSTREAM_PORTS 0x10
struct dp_panel_info {
u32 h_active;
u32 v_active;
u32 h_back_porch;
u32 h_front_porch;
u32 h_sync_width;
u32 h_active_low;
u32 v_back_porch;
u32 v_front_porch;
u32 v_sync_width;
u32 v_active_low;
u32 h_skew;
u32 refresh_rate;
u32 pixel_clk_khz;
u32 bpp;
bool widebus_en;
struct msm_compression_info comp_info;
s64 dsc_overhead_fp;
};
struct dp_display_mode {
struct dp_panel_info timing;
u32 capabilities;
s64 fec_overhead_fp;
s64 dsc_overhead_fp;
};
struct dp_panel;
struct dp_panel_in {
struct device *dev;
struct dp_aux *aux;
struct dp_link *link;
struct dp_catalog_panel *catalog;
struct drm_connector *connector;
struct dp_panel *base_panel;
struct dp_parser *parser;
};
struct dp_dsc_caps {
bool dsc_capable;
u8 version;
bool block_pred_en;
};
struct dp_audio;
#define DP_PANEL_CAPS_DSC BIT(0)
struct dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
u8 ds_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 dsc_dpcd[DP_RECEIVER_DSC_CAP_SIZE + 1];
u8 fec_dpcd;
u8 fec_sts_dpcd[DP_RECEIVER_FEC_STATUS_SIZE + 1];
struct drm_dp_link link_info;
struct sde_edid_ctrl *edid_ctrl;
struct dp_panel_info pinfo;
bool video_test;
bool spd_enabled;
u32 vic;
u32 max_pclk_khz;
s64 mst_target_sc;
/* debug */
u32 max_bw_code;
/* By default, stream_id is assigned to DP_INVALID_STREAM.
* Client sets the stream id value using set_stream_id interface.
*/
enum dp_stream_id stream_id;
int vcpi;
u32 channel_start_slot;
u32 channel_total_slots;
u32 pbn;
u32 tot_dsc_blks_in_use;
/* DRM connector assosiated with this panel */
struct drm_connector *connector;
struct dp_audio *audio;
bool audio_supported;
struct dp_dsc_caps sink_dsc_caps;
bool dsc_feature_enable;
bool fec_feature_enable;
bool dsc_en;
bool fec_en;
bool widebus_en;
bool mst_state;
s64 fec_overhead_fp;
int (*init)(struct dp_panel *dp_panel);
int (*deinit)(struct dp_panel *dp_panel, u32 flags);
int (*hw_cfg)(struct dp_panel *dp_panel, bool enable);
int (*read_sink_caps)(struct dp_panel *dp_panel,
struct drm_connector *connector, bool multi_func);
u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
int (*get_modes)(struct dp_panel *dp_panel,
struct drm_connector *connector, struct dp_display_mode *mode);
void (*handle_sink_request)(struct dp_panel *dp_panel);
int (*set_edid)(struct dp_panel *dp_panel, u8 *edid);
int (*set_dpcd)(struct dp_panel *dp_panel, u8 *dpcd);
int (*setup_hdr)(struct dp_panel *dp_panel,
struct drm_msm_ext_hdr_metadata *hdr_meta,
bool dhdr_update, u64 core_clk_rate);
void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
int (*spd_config)(struct dp_panel *dp_panel);
bool (*hdr_supported)(struct dp_panel *dp_panel);
int (*set_stream_info)(struct dp_panel *dp_panel,
enum dp_stream_id stream_id, u32 ch_start_slot,
u32 ch_tot_slots, u32 pbn, int vcpi);
int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size);
int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid);
bool (*read_mst_cap)(struct dp_panel *dp_panel);
void (*convert_to_dp_mode)(struct dp_panel *dp_panel,
const struct drm_display_mode *drm_mode,
struct dp_display_mode *dp_mode);
void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd);
};
struct dp_tu_calc_input {
u64 lclk; /* 162, 270, 540 and 810 */
u64 pclk_khz; /* in KHz */
u64 hactive; /* active h-width */
u64 hporch; /* bp + fp + pulse */
int nlanes; /* no.of.lanes */
int bpp; /* bits */
int pixel_enc; /* 444, 420, 422 */
int dsc_en; /* dsc on/off */
int async_en; /* async mode */
int fec_en; /* fec */
int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
int num_of_dsc_slices; /* number of slices per line */
};
struct dp_vc_tu_mapping_table {
u32 vic;
u8 lanes;
u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
u8 bpp;
u32 valid_boundary_link;
u32 delay_start_link;
bool boundary_moderation_en;
u32 valid_lower_boundary_link;
u32 upper_boundary_count;
u32 lower_boundary_count;
u32 tu_size_minus1;
};
/**
* is_link_rate_valid() - validates the link rate
* @lane_rate: link rate requested by the sink
*
* Returns true if the requested link rate is supported.
*/
static inline bool is_link_rate_valid(u32 bw_code)
{
return ((bw_code == DP_LINK_BW_1_62) ||
(bw_code == DP_LINK_BW_2_7) ||
(bw_code == DP_LINK_BW_5_4) ||
(bw_code == DP_LINK_BW_8_1));
}
/**
* dp_link_is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
* Returns true if the requested lane count is supported.
*/
static inline bool is_lane_count_valid(u32 lane_count)
{
return (lane_count == DP_LANE_COUNT_1) ||
(lane_count == DP_LANE_COUNT_2) ||
(lane_count == DP_LANE_COUNT_4);
}
struct dp_panel *dp_panel_get(struct dp_panel_in *in);
void dp_panel_put(struct dp_panel *dp_panel);
void dp_panel_calc_tu_test(struct dp_tu_calc_input *in,
struct dp_vc_tu_mapping_table *tu_table);
#endif /* _DP_PANEL_H_ */

933
msm/dp/dp_parser.c Normal file
View File

@@ -0,0 +1,933 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include "dp_parser.h"
static void dp_parser_unmap_io_resources(struct dp_parser *parser)
{
int i = 0;
struct dp_io *io = &parser->io;
for (i = 0; i < io->len; i++)
msm_dss_iounmap(&io->data[i].io);
}
static int dp_parser_reg(struct dp_parser *parser)
{
int rc = 0, i = 0;
u32 reg_count;
struct platform_device *pdev = parser->pdev;
struct dp_io *io = &parser->io;
struct device *dev = &pdev->dev;
reg_count = of_property_count_strings(dev->of_node, "reg-names");
if (reg_count <= 0) {
pr_err("no reg defined\n");
return -EINVAL;
}
io->len = reg_count;
io->data = devm_kzalloc(dev, sizeof(struct dp_io_data) * reg_count,
GFP_KERNEL);
if (!io->data)
return -ENOMEM;
for (i = 0; i < reg_count; i++) {
of_property_read_string_index(dev->of_node,
"reg-names", i, &io->data[i].name);
rc = msm_dss_ioremap_byname(pdev, &io->data[i].io,
io->data[i].name);
if (rc) {
pr_err("unable to remap %s resources\n",
io->data[i].name);
goto err;
}
}
return 0;
err:
dp_parser_unmap_io_resources(parser);
return rc;
}
static const char *dp_get_phy_aux_config_property(u32 cfg_type)
{
switch (cfg_type) {
case PHY_AUX_CFG0:
return "qcom,aux-cfg0-settings";
case PHY_AUX_CFG1:
return "qcom,aux-cfg1-settings";
case PHY_AUX_CFG2:
return "qcom,aux-cfg2-settings";
case PHY_AUX_CFG3:
return "qcom,aux-cfg3-settings";
case PHY_AUX_CFG4:
return "qcom,aux-cfg4-settings";
case PHY_AUX_CFG5:
return "qcom,aux-cfg5-settings";
case PHY_AUX_CFG6:
return "qcom,aux-cfg6-settings";
case PHY_AUX_CFG7:
return "qcom,aux-cfg7-settings";
case PHY_AUX_CFG8:
return "qcom,aux-cfg8-settings";
case PHY_AUX_CFG9:
return "qcom,aux-cfg9-settings";
default:
return "unknown";
}
}
static void dp_parser_phy_aux_cfg_reset(struct dp_parser *parser)
{
int i = 0;
for (i = 0; i < PHY_AUX_CFG_MAX; i++)
parser->aux_cfg[i] = (const struct dp_aux_cfg){ 0 };
}
static int dp_parser_aux(struct dp_parser *parser)
{
struct device_node *of_node = parser->pdev->dev.of_node;
int len = 0, i = 0, j = 0, config_count = 0;
const char *data;
int const minimum_config_count = 1;
for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
const char *property = dp_get_phy_aux_config_property(i);
data = of_get_property(of_node, property, &len);
if (!data) {
pr_err("Unable to read %s\n", property);
goto error;
}
config_count = len - 1;
if ((config_count < minimum_config_count) ||
(config_count > DP_AUX_CFG_MAX_VALUE_CNT)) {
pr_err("Invalid config count (%d) configs for %s\n",
config_count, property);
goto error;
}
parser->aux_cfg[i].offset = data[0];
parser->aux_cfg[i].cfg_cnt = config_count;
pr_debug("%s offset=0x%x, cfg_cnt=%d\n",
property,
parser->aux_cfg[i].offset,
parser->aux_cfg[i].cfg_cnt);
for (j = 1; j < len; j++) {
parser->aux_cfg[i].lut[j - 1] = data[j];
pr_debug("%s lut[%d]=0x%x\n",
property,
i,
parser->aux_cfg[i].lut[j - 1]);
}
}
return 0;
error:
dp_parser_phy_aux_cfg_reset(parser);
return -EINVAL;
}
static int dp_parser_misc(struct dp_parser *parser)
{
int rc = 0, len = 0, i = 0;
const char *data = NULL;
struct device_node *of_node = parser->pdev->dev.of_node;
data = of_get_property(of_node, "qcom,logical2physical-lane-map", &len);
if (data && (len == DP_MAX_PHY_LN)) {
for (i = 0; i < len; i++)
parser->l_map[i] = data[i];
}
data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len);
if (data && (len == DP_MAX_PHY_LN)) {
for (i = 0; i < len; i++)
parser->l_pnswap |= (data[i] & 0x01) << i;
}
rc = of_property_read_u32(of_node,
"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
if (rc)
parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
rc = of_property_read_u32(of_node,
"qcom,max-lclk-frequency-khz", &parser->max_lclk_khz);
if (rc)
parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ;
return 0;
}
static int dp_parser_msm_hdcp_dev(struct dp_parser *parser)
{
struct device_node *node;
struct platform_device *pdev;
node = of_find_compatible_node(NULL, NULL, "qcom,msm-hdcp");
if (!node) {
// This is a non-fatal error, module initialization can proceed
pr_warn("couldn't find msm-hdcp node\n");
return 0;
}
pdev = of_find_device_by_node(node);
if (!pdev) {
// This is a non-fatal error, module initialization can proceed
pr_warn("couldn't find msm-hdcp pdev\n");
return 0;
}
parser->msm_hdcp_dev = &pdev->dev;
return 0;
}
static int dp_parser_pinctrl(struct dp_parser *parser)
{
int rc = 0;
struct dp_pinctrl *pinctrl = &parser->pinctrl;
pinctrl->pin = devm_pinctrl_get(&parser->pdev->dev);
if (IS_ERR_OR_NULL(pinctrl->pin)) {
pr_debug("failed to get pinctrl, rc=%d\n", rc);
goto error;
}
if (parser->no_aux_switch && parser->lphw_hpd) {
pinctrl->state_hpd_tlmm = pinctrl->state_hpd_ctrl = NULL;
pinctrl->state_hpd_tlmm = pinctrl_lookup_state(pinctrl->pin,
"mdss_dp_hpd_tlmm");
if (!IS_ERR_OR_NULL(pinctrl->state_hpd_tlmm)) {
pinctrl->state_hpd_ctrl = pinctrl_lookup_state(
pinctrl->pin, "mdss_dp_hpd_ctrl");
}
if (!pinctrl->state_hpd_tlmm || !pinctrl->state_hpd_ctrl) {
pinctrl->state_hpd_tlmm = NULL;
pinctrl->state_hpd_ctrl = NULL;
pr_debug("tlmm or ctrl pinctrl state does not exist\n");
}
}
pinctrl->state_active = pinctrl_lookup_state(pinctrl->pin,
"mdss_dp_active");
if (IS_ERR_OR_NULL(pinctrl->state_active)) {
rc = PTR_ERR(pinctrl->state_active);
pr_err("failed to get pinctrl active state, rc=%d\n", rc);
goto error;
}
pinctrl->state_suspend = pinctrl_lookup_state(pinctrl->pin,
"mdss_dp_sleep");
if (IS_ERR_OR_NULL(pinctrl->state_suspend)) {
rc = PTR_ERR(pinctrl->state_suspend);
pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
goto error;
}
error:
return rc;
}
static int dp_parser_gpio(struct dp_parser *parser)
{
int i = 0;
struct device *dev = &parser->pdev->dev;
struct device_node *of_node = dev->of_node;
struct dss_module_power *mp = &parser->mp[DP_CORE_PM];
static const char * const dp_gpios[] = {
"qcom,aux-en-gpio",
"qcom,aux-sel-gpio",
"qcom,usbplug-cc-gpio",
};
if (of_find_property(of_node, "qcom,dp-hpd-gpio", NULL)) {
parser->no_aux_switch = true;
parser->lphw_hpd = of_find_property(of_node,
"qcom,dp-low-power-hw-hpd", NULL);
return 0;
}
if (of_find_property(of_node, "qcom,dp-gpio-aux-switch", NULL))
parser->gpio_aux_switch = true;
mp->gpio_config = devm_kzalloc(dev,
sizeof(struct dss_gpio) * ARRAY_SIZE(dp_gpios), GFP_KERNEL);
if (!mp->gpio_config)
return -ENOMEM;
mp->num_gpio = ARRAY_SIZE(dp_gpios);
for (i = 0; i < ARRAY_SIZE(dp_gpios); i++) {
mp->gpio_config[i].gpio = of_get_named_gpio(of_node,
dp_gpios[i], 0);
if (!gpio_is_valid(mp->gpio_config[i].gpio)) {
pr_debug("%s gpio not specified\n", dp_gpios[i]);
/* In case any gpio was not specified, we think gpio
* aux switch also was not specified.
*/
parser->gpio_aux_switch = false;
continue;
}
strlcpy(mp->gpio_config[i].gpio_name, dp_gpios[i],
sizeof(mp->gpio_config[i].gpio_name));
mp->gpio_config[i].value = 0;
}
return 0;
}
static const char *dp_parser_supply_node_name(enum dp_pm_type module)
{
switch (module) {
case DP_CORE_PM: return "qcom,core-supply-entries";
case DP_CTRL_PM: return "qcom,ctrl-supply-entries";
case DP_PHY_PM: return "qcom,phy-supply-entries";
default: return "???";
}
}
static int dp_parser_get_vreg(struct dp_parser *parser,
enum dp_pm_type module)
{
int i = 0, rc = 0;
u32 tmp = 0;
const char *pm_supply_name = NULL;
struct device_node *supply_node = NULL;
struct device_node *of_node = parser->pdev->dev.of_node;
struct device_node *supply_root_node = NULL;
struct dss_module_power *mp = &parser->mp[module];
mp->num_vreg = 0;
pm_supply_name = dp_parser_supply_node_name(module);
supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
if (!supply_root_node) {
pr_err("no supply entry present: %s\n", pm_supply_name);
goto novreg;
}
mp->num_vreg = of_get_available_child_count(supply_root_node);
if (mp->num_vreg == 0) {
pr_debug("no vreg\n");
goto novreg;
} else {
pr_debug("vreg found. count=%d\n", mp->num_vreg);
}
mp->vreg_config = devm_kzalloc(&parser->pdev->dev,
sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL);
if (!mp->vreg_config) {
rc = -ENOMEM;
goto error;
}
for_each_child_of_node(supply_root_node, supply_node) {
const char *st = NULL;
/* vreg-name */
rc = of_property_read_string(supply_node,
"qcom,supply-name", &st);
if (rc) {
pr_err("error reading name. rc=%d\n",
rc);
goto error;
}
snprintf(mp->vreg_config[i].vreg_name,
ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
/* vreg-min-voltage */
rc = of_property_read_u32(supply_node,
"qcom,supply-min-voltage", &tmp);
if (rc) {
pr_err("error reading min volt. rc=%d\n",
rc);
goto error;
}
mp->vreg_config[i].min_voltage = tmp;
/* vreg-max-voltage */
rc = of_property_read_u32(supply_node,
"qcom,supply-max-voltage", &tmp);
if (rc) {
pr_err("error reading max volt. rc=%d\n",
rc);
goto error;
}
mp->vreg_config[i].max_voltage = tmp;
/* enable-load */
rc = of_property_read_u32(supply_node,
"qcom,supply-enable-load", &tmp);
if (rc) {
pr_err("error reading enable load. rc=%d\n",
rc);
goto error;
}
mp->vreg_config[i].enable_load = tmp;
/* disable-load */
rc = of_property_read_u32(supply_node,
"qcom,supply-disable-load", &tmp);
if (rc) {
pr_err("error reading disable load. rc=%d\n",
rc);
goto error;
}
mp->vreg_config[i].disable_load = tmp;
pr_debug("%s min=%d, max=%d, enable=%d, disable=%d\n",
mp->vreg_config[i].vreg_name,
mp->vreg_config[i].min_voltage,
mp->vreg_config[i].max_voltage,
mp->vreg_config[i].enable_load,
mp->vreg_config[i].disable_load
);
++i;
}
return rc;
error:
if (mp->vreg_config) {
devm_kfree(&parser->pdev->dev, mp->vreg_config);
mp->vreg_config = NULL;
}
novreg:
mp->num_vreg = 0;
return rc;
}
static void dp_parser_put_vreg_data(struct device *dev,
struct dss_module_power *mp)
{
if (!mp) {
DEV_ERR("invalid input\n");
return;
}
if (mp->vreg_config) {
devm_kfree(dev, mp->vreg_config);
mp->vreg_config = NULL;
}
mp->num_vreg = 0;
}
static int dp_parser_regulator(struct dp_parser *parser)
{
int i, rc = 0;
struct platform_device *pdev = parser->pdev;
/* Parse the regulator information */
for (i = DP_CORE_PM; i < DP_MAX_PM; i++) {
rc = dp_parser_get_vreg(parser, i);
if (rc) {
pr_err("get_dt_vreg_data failed for %s. rc=%d\n",
dp_parser_pm_name(i), rc);
i--;
for (; i >= DP_CORE_PM; i--)
dp_parser_put_vreg_data(&pdev->dev,
&parser->mp[i]);
break;
}
}
return rc;
}
static bool dp_parser_check_prefix(const char *clk_prefix, const char *clk_name)
{
return !!strnstr(clk_name, clk_prefix, strlen(clk_name));
}
static void dp_parser_put_clk_data(struct device *dev,
struct dss_module_power *mp)
{
if (!mp) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
if (mp->clk_config) {
devm_kfree(dev, mp->clk_config);
mp->clk_config = NULL;
}
mp->num_clk = 0;
}
static void dp_parser_put_gpio_data(struct device *dev,
struct dss_module_power *mp)
{
if (!mp) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
if (mp->gpio_config) {
devm_kfree(dev, mp->gpio_config);
mp->gpio_config = NULL;
}
mp->num_gpio = 0;
}
static int dp_parser_init_clk_data(struct dp_parser *parser)
{
int num_clk = 0, i = 0, rc = 0;
int core_clk_count = 0, link_clk_count = 0;
int strm0_clk_count = 0, strm1_clk_count = 0;
const char *core_clk = "core";
const char *strm0_clk = "strm0";
const char *strm1_clk = "strm1";
const char *link_clk = "link";
const char *clk_name;
struct device *dev = &parser->pdev->dev;
struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
struct dss_module_power *strm0_power = &parser->mp[DP_STREAM0_PM];
struct dss_module_power *strm1_power = &parser->mp[DP_STREAM1_PM];
struct dss_module_power *link_power = &parser->mp[DP_LINK_PM];
num_clk = of_property_count_strings(dev->of_node, "clock-names");
if (num_clk <= 0) {
pr_err("no clocks are defined\n");
rc = -EINVAL;
goto exit;
}
for (i = 0; i < num_clk; i++) {
of_property_read_string_index(dev->of_node,
"clock-names", i, &clk_name);
if (dp_parser_check_prefix(core_clk, clk_name))
core_clk_count++;
if (dp_parser_check_prefix(strm0_clk, clk_name))
strm0_clk_count++;
if (dp_parser_check_prefix(strm1_clk, clk_name))
strm1_clk_count++;
if (dp_parser_check_prefix(link_clk, clk_name))
link_clk_count++;
}
/* Initialize the CORE power module */
if (core_clk_count <= 0) {
pr_err("no core clocks are defined\n");
rc = -EINVAL;
goto exit;
}
core_power->num_clk = core_clk_count;
core_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * core_power->num_clk,
GFP_KERNEL);
if (!core_power->clk_config) {
rc = -EINVAL;
goto exit;
}
/* Initialize the STREAM0 power module */
if (strm0_clk_count <= 0) {
pr_debug("no strm0 clocks are defined\n");
} else {
strm0_power->num_clk = strm0_clk_count;
strm0_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * strm0_power->num_clk,
GFP_KERNEL);
if (!strm0_power->clk_config) {
strm0_power->num_clk = 0;
rc = -EINVAL;
goto strm0_clock_error;
}
}
/* Initialize the STREAM1 power module */
if (strm1_clk_count <= 0) {
pr_debug("no strm1 clocks are defined\n");
} else {
strm1_power->num_clk = strm1_clk_count;
strm1_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * strm1_power->num_clk,
GFP_KERNEL);
if (!strm1_power->clk_config) {
strm1_power->num_clk = 0;
rc = -EINVAL;
goto strm1_clock_error;
}
}
/* Initialize the link power module */
if (link_clk_count <= 0) {
pr_err("no link clocks are defined\n");
rc = -EINVAL;
goto link_clock_error;
}
link_power->num_clk = link_clk_count;
link_power->clk_config = devm_kzalloc(dev,
sizeof(struct dss_clk) * link_power->num_clk,
GFP_KERNEL);
if (!link_power->clk_config) {
link_power->num_clk = 0;
rc = -EINVAL;
goto link_clock_error;
}
return rc;
link_clock_error:
dp_parser_put_clk_data(dev, strm1_power);
strm1_clock_error:
dp_parser_put_clk_data(dev, strm0_power);
strm0_clock_error:
dp_parser_put_clk_data(dev, core_power);
exit:
return rc;
}
static int dp_parser_clock(struct dp_parser *parser)
{
int rc = 0, i = 0;
int num_clk = 0;
int core_clk_index = 0, link_clk_index = 0;
int core_clk_count = 0, link_clk_count = 0;
int strm0_clk_index = 0, strm1_clk_index = 0;
int strm0_clk_count = 0, strm1_clk_count = 0;
const char *clk_name;
const char *core_clk = "core";
const char *strm0_clk = "strm0";
const char *strm1_clk = "strm1";
const char *link_clk = "link";
struct device *dev = &parser->pdev->dev;
struct dss_module_power *core_power;
struct dss_module_power *strm0_power;
struct dss_module_power *strm1_power;
struct dss_module_power *link_power;
core_power = &parser->mp[DP_CORE_PM];
strm0_power = &parser->mp[DP_STREAM0_PM];
strm1_power = &parser->mp[DP_STREAM1_PM];
link_power = &parser->mp[DP_LINK_PM];
rc = dp_parser_init_clk_data(parser);
if (rc) {
pr_err("failed to initialize power data\n");
rc = -EINVAL;
goto exit;
}
core_clk_count = core_power->num_clk;
link_clk_count = link_power->num_clk;
strm0_clk_count = strm0_power->num_clk;
strm1_clk_count = strm1_power->num_clk;
num_clk = of_property_count_strings(dev->of_node, "clock-names");
for (i = 0; i < num_clk; i++) {
of_property_read_string_index(dev->of_node, "clock-names",
i, &clk_name);
if (dp_parser_check_prefix(core_clk, clk_name) &&
core_clk_index < core_clk_count) {
struct dss_clk *clk =
&core_power->clk_config[core_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
clk->type = DSS_CLK_AHB;
core_clk_index++;
} else if (dp_parser_check_prefix(link_clk, clk_name) &&
link_clk_index < link_clk_count) {
struct dss_clk *clk =
&link_power->clk_config[link_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
link_clk_index++;
if (!strcmp(clk_name, "link_clk"))
clk->type = DSS_CLK_PCLK;
else
clk->type = DSS_CLK_AHB;
} else if (dp_parser_check_prefix(strm0_clk, clk_name) &&
strm0_clk_index < strm0_clk_count) {
struct dss_clk *clk =
&strm0_power->clk_config[strm0_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
strm0_clk_index++;
clk->type = DSS_CLK_PCLK;
} else if (dp_parser_check_prefix(strm1_clk, clk_name) &&
strm1_clk_index < strm1_clk_count) {
struct dss_clk *clk =
&strm1_power->clk_config[strm1_clk_index];
strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
strm1_clk_index++;
clk->type = DSS_CLK_PCLK;
}
}
pr_debug("clock parsing successful\n");
exit:
return rc;
}
static int dp_parser_catalog(struct dp_parser *parser)
{
int rc;
u32 version;
struct device *dev = &parser->pdev->dev;
rc = of_property_read_u32(dev->of_node, "qcom,phy-version", &version);
if (!rc)
parser->hw_cfg.phy_version = version;
return 0;
}
static int dp_parser_mst(struct dp_parser *parser)
{
struct device *dev = &parser->pdev->dev;
int i;
parser->has_mst = of_property_read_bool(dev->of_node,
"qcom,mst-enable");
parser->has_mst_sideband = parser->has_mst;
pr_debug("mst parsing successful. mst:%d\n", parser->has_mst);
for (i = 0; i < MAX_DP_MST_STREAMS; i++) {
of_property_read_u32_index(dev->of_node,
"qcom,mst-fixed-topology-ports", i,
&parser->mst_fixed_port[i]);
}
return 0;
}
static void dp_parser_dsc(struct dp_parser *parser)
{
int rc;
struct device *dev = &parser->pdev->dev;
parser->dsc_feature_enable = of_property_read_bool(dev->of_node,
"qcom,dsc-feature-enable");
rc = of_property_read_u32(dev->of_node,
"qcom,max-dp-dsc-blks", &parser->max_dp_dsc_blks);
if (rc || !parser->max_dp_dsc_blks)
parser->dsc_feature_enable = false;
rc = of_property_read_u32(dev->of_node,
"qcom,max-dp-dsc-input-width-pixs",
&parser->max_dp_dsc_input_width_pixs);
if (rc || !parser->max_dp_dsc_input_width_pixs)
parser->dsc_feature_enable = false;
pr_debug("dsc parsing successful. dsc:%d, blks:%d, width:%d\n",
parser->dsc_feature_enable,
parser->max_dp_dsc_blks,
parser->max_dp_dsc_input_width_pixs);
}
static void dp_parser_fec(struct dp_parser *parser)
{
struct device *dev = &parser->pdev->dev;
parser->fec_feature_enable = of_property_read_bool(dev->of_node,
"qcom,fec-feature-enable");
pr_debug("fec parsing successful. fec:%d\n",
parser->fec_feature_enable);
}
static void dp_parser_widebus(struct dp_parser *parser)
{
struct device *dev = &parser->pdev->dev;
parser->has_widebus = of_property_read_bool(dev->of_node,
"qcom,widebus-enable");
pr_debug("widebus parsing successful. widebus:%d\n",
parser->has_widebus);
}
static int dp_parser_parse(struct dp_parser *parser)
{
int rc = 0;
if (!parser) {
pr_err("invalid input\n");
rc = -EINVAL;
goto err;
}
rc = dp_parser_reg(parser);
if (rc)
goto err;
rc = dp_parser_aux(parser);
if (rc)
goto err;
rc = dp_parser_misc(parser);
if (rc)
goto err;
rc = dp_parser_clock(parser);
if (rc)
goto err;
rc = dp_parser_regulator(parser);
if (rc)
goto err;
rc = dp_parser_gpio(parser);
if (rc)
goto err;
rc = dp_parser_catalog(parser);
if (rc)
goto err;
rc = dp_parser_pinctrl(parser);
if (rc)
goto err;
rc = dp_parser_msm_hdcp_dev(parser);
if (rc)
goto err;
rc = dp_parser_mst(parser);
if (rc)
goto err;
dp_parser_dsc(parser);
dp_parser_fec(parser);
dp_parser_widebus(parser);
err:
return rc;
}
static struct dp_io_data *dp_parser_get_io(struct dp_parser *dp_parser,
char *name)
{
int i = 0;
struct dp_io *io;
if (!dp_parser) {
pr_err("invalid input\n");
goto err;
}
io = &dp_parser->io;
for (i = 0; i < io->len; i++) {
struct dp_io_data *data = &io->data[i];
if (!strcmp(data->name, name))
return data;
}
err:
return NULL;
}
static void dp_parser_get_io_buf(struct dp_parser *dp_parser, char *name)
{
int i = 0;
struct dp_io *io;
if (!dp_parser) {
pr_err("invalid input\n");
return;
}
io = &dp_parser->io;
for (i = 0; i < io->len; i++) {
struct dp_io_data *data = &io->data[i];
if (!strcmp(data->name, name)) {
if (!data->buf)
data->buf = devm_kzalloc(&dp_parser->pdev->dev,
data->io.len, GFP_KERNEL);
}
}
}
static void dp_parser_clear_io_buf(struct dp_parser *dp_parser)
{
int i = 0;
struct dp_io *io;
if (!dp_parser) {
pr_err("invalid input\n");
return;
}
io = &dp_parser->io;
for (i = 0; i < io->len; i++) {
struct dp_io_data *data = &io->data[i];
if (data->buf)
devm_kfree(&dp_parser->pdev->dev, data->buf);
data->buf = NULL;
}
}
struct dp_parser *dp_parser_get(struct platform_device *pdev)
{
struct dp_parser *parser;
parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
if (!parser)
return ERR_PTR(-ENOMEM);
parser->parse = dp_parser_parse;
parser->get_io = dp_parser_get_io;
parser->get_io_buf = dp_parser_get_io_buf;
parser->clear_io_buf = dp_parser_clear_io_buf;
parser->pdev = pdev;
return parser;
}
void dp_parser_put(struct dp_parser *parser)
{
int i = 0;
struct dss_module_power *power = NULL;
if (!parser) {
pr_err("invalid parser module\n");
return;
}
power = parser->mp;
for (i = 0; i < DP_MAX_PM; i++) {
dp_parser_put_clk_data(&parser->pdev->dev, &power[i]);
dp_parser_put_vreg_data(&parser->pdev->dev, &power[i]);
dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]);
}
dp_parser_clear_io_buf(parser);
devm_kfree(&parser->pdev->dev, parser->io.data);
devm_kfree(&parser->pdev->dev, parser);
}

271
msm/dp/dp_parser.h Normal file
View File

@@ -0,0 +1,271 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_PARSER_H_
#define _DP_PARSER_H_
#include <linux/sde_io_util.h>
#define DP_LABEL "MDSS DP DISPLAY"
#define AUX_CFG_LEN 10
#define DP_MAX_PIXEL_CLK_KHZ 675000
#define DP_MAX_LINK_CLK_KHZ 810000
#define MAX_DP_MST_STREAMS 2
enum dp_pm_type {
DP_CORE_PM,
DP_CTRL_PM,
DP_PHY_PM,
DP_STREAM0_PM,
DP_STREAM1_PM,
DP_LINK_PM,
DP_MAX_PM
};
static inline const char *dp_parser_pm_name(enum dp_pm_type module)
{
switch (module) {
case DP_CORE_PM: return "DP_CORE_PM";
case DP_CTRL_PM: return "DP_CTRL_PM";
case DP_PHY_PM: return "DP_PHY_PM";
case DP_STREAM0_PM: return "DP_STREAM0_PM";
case DP_STREAM1_PM: return "DP_STREAM1_PM";
case DP_LINK_PM: return "DP_LINK_PM";
default: return "???";
}
}
/**
* struct dp_display_data - display related device tree data.
*
* @ctrl_node: referece to controller device
* @phy_node: reference to phy device
* @is_active: is the controller currently active
* @name: name of the display
* @display_type: type of the display
*/
struct dp_display_data {
struct device_node *ctrl_node;
struct device_node *phy_node;
bool is_active;
const char *name;
const char *display_type;
};
/**
* struct dp_io_data - data structure to store DP IO related info
* @name: name of the IO
* @buf: buffer corresponding to IO for debugging
* @io: io data which give len and mapped address
*/
struct dp_io_data {
const char *name;
u8 *buf;
struct dss_io_data io;
};
/**
* struct dp_io - data struct to store array of DP IO info
* @len: total number of IOs
* @data: pointer to an array of DP IO data structures.
*/
struct dp_io {
u32 len;
struct dp_io_data *data;
};
/**
* struct dp_pinctrl - DP's pin control
*
* @pin: pin-controller's instance
* @state_active: active state pin control
* @state_hpd_active: hpd active state pin control
* @state_suspend: suspend state pin control
*/
struct dp_pinctrl {
struct pinctrl *pin;
struct pinctrl_state *state_active;
struct pinctrl_state *state_hpd_active;
struct pinctrl_state *state_hpd_tlmm;
struct pinctrl_state *state_hpd_ctrl;
struct pinctrl_state *state_suspend;
};
#define DP_ENUM_STR(x) #x
#define DP_AUX_CFG_MAX_VALUE_CNT 3
/**
* struct dp_aux_cfg - DP's AUX configuration settings
*
* @cfg_cnt: count of the configurable settings for the AUX register
* @current_index: current index of the AUX config lut
* @offset: register offset of the AUX config register
* @lut: look up table for the AUX config values for this register
*/
struct dp_aux_cfg {
u32 cfg_cnt;
u32 current_index;
u32 offset;
u32 lut[DP_AUX_CFG_MAX_VALUE_CNT];
};
/* PHY AUX config registers */
enum dp_phy_aux_config_type {
PHY_AUX_CFG0,
PHY_AUX_CFG1,
PHY_AUX_CFG2,
PHY_AUX_CFG3,
PHY_AUX_CFG4,
PHY_AUX_CFG5,
PHY_AUX_CFG6,
PHY_AUX_CFG7,
PHY_AUX_CFG8,
PHY_AUX_CFG9,
PHY_AUX_CFG_MAX,
};
/**
* enum dp_phy_version - version of the dp phy
* @DP_PHY_VERSION_UNKNOWN: Unknown controller version
* @DP_PHY_VERSION_4_2_0: DP phy v4.2.0 controller
* @DP_PHY_VERSION_MAX: max version
*/
enum dp_phy_version {
DP_PHY_VERSION_UNKNOWN,
DP_PHY_VERSION_2_0_0 = 0x200,
DP_PHY_VERSION_4_2_0 = 0x420,
DP_PHY_VERSION_MAX
};
/**
* struct dp_hw_cfg - DP HW specific configuration
*
* @phy_version: DP PHY HW version
*/
struct dp_hw_cfg {
enum dp_phy_version phy_version;
};
static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type)
{
switch (cfg_type) {
case PHY_AUX_CFG0:
return DP_ENUM_STR(PHY_AUX_CFG0);
case PHY_AUX_CFG1:
return DP_ENUM_STR(PHY_AUX_CFG1);
case PHY_AUX_CFG2:
return DP_ENUM_STR(PHY_AUX_CFG2);
case PHY_AUX_CFG3:
return DP_ENUM_STR(PHY_AUX_CFG3);
case PHY_AUX_CFG4:
return DP_ENUM_STR(PHY_AUX_CFG4);
case PHY_AUX_CFG5:
return DP_ENUM_STR(PHY_AUX_CFG5);
case PHY_AUX_CFG6:
return DP_ENUM_STR(PHY_AUX_CFG6);
case PHY_AUX_CFG7:
return DP_ENUM_STR(PHY_AUX_CFG7);
case PHY_AUX_CFG8:
return DP_ENUM_STR(PHY_AUX_CFG8);
case PHY_AUX_CFG9:
return DP_ENUM_STR(PHY_AUX_CFG9);
default:
return "unknown";
}
}
/**
* struct dp_parser - DP parser's data exposed to clients
*
* @pdev: platform data of the client
* @msm_hdcp_dev: device pointer for the HDCP driver
* @mp: gpio, regulator and clock related data
* @pinctrl: pin-control related data
* @disp_data: controller's display related data
* @l_pnswap: P/N swap status on each lane
* @max_pclk_khz: maximum pixel clock supported for the platform
* @max_lclk_khz: maximum link clock supported for the platform
* @hw_cfg: DP HW specific settings
* @has_mst: MST feature enable status
* @has_mst_sideband: MST sideband feature enable status
* @no_aux_switch: presence AUX switch status
* @gpio_aux_switch: presence GPIO AUX switch status
* @dsc_feature_enable: DSC feature enable status
* @fec_feature_enable: FEC feature enable status
* @max_dp_dsc_blks: maximum DSC blks for DP interface
* @max_dp_dsc_input_width_pixs: Maximum input width for DSC block
* @has_widebus: widebus (2PPC) feature eanble status
*@mst_fixed_port: mst port_num reserved for fixed topology
* @parse: function to be called by client to parse device tree.
* @get_io: function to be called by client to get io data.
* @get_io_buf: function to be called by client to get io buffers.
* @clear_io_buf: function to be called by client to clear io buffers.
*/
struct dp_parser {
struct platform_device *pdev;
struct device *msm_hdcp_dev;
struct dss_module_power mp[DP_MAX_PM];
struct dp_pinctrl pinctrl;
struct dp_io io;
struct dp_display_data disp_data;
u8 l_map[4];
u8 l_pnswap;
struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
u32 max_pclk_khz;
u32 max_lclk_khz;
struct dp_hw_cfg hw_cfg;
bool has_mst;
bool has_mst_sideband;
bool no_aux_switch;
bool dsc_feature_enable;
bool fec_feature_enable;
bool has_widebus;
bool gpio_aux_switch;
u32 max_dp_dsc_blks;
u32 max_dp_dsc_input_width_pixs;
bool lphw_hpd;
u32 mst_fixed_port[MAX_DP_MST_STREAMS];
int (*parse)(struct dp_parser *parser);
struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
void (*get_io_buf)(struct dp_parser *parser, char *name);
void (*clear_io_buf)(struct dp_parser *parser);
};
enum dp_phy_lane_num {
DP_PHY_LN0 = 0,
DP_PHY_LN1 = 1,
DP_PHY_LN2 = 2,
DP_PHY_LN3 = 3,
DP_MAX_PHY_LN = 4,
};
enum dp_mainlink_lane_num {
DP_ML0 = 0,
DP_ML1 = 1,
DP_ML2 = 2,
DP_ML3 = 3,
};
/**
* dp_parser_get() - get the DP's device tree parser module
*
* @pdev: platform data of the client
* return: pointer to dp_parser structure.
*
* This function provides client capability to parse the
* device tree and populate the data structures. The data
* related to clock, regulators, pin-control and other
* can be parsed using this module.
*/
struct dp_parser *dp_parser_get(struct platform_device *pdev);
/**
* dp_parser_put() - cleans the dp_parser module
*
* @parser: pointer to the parser's data.
*/
void dp_parser_put(struct dp_parser *parser);
#endif

720
msm/dp/dp_power.c Normal file
View File

@@ -0,0 +1,720 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/clk.h>
#include "dp_power.h"
#include "dp_catalog.h"
#define DP_CLIENT_NAME_SIZE 20
struct dp_power_private {
struct dp_parser *parser;
struct platform_device *pdev;
struct clk *pixel_clk_rcg;
struct clk *pixel_parent;
struct clk *pixel1_clk_rcg;
struct clk *pixel1_parent;
struct dp_power dp_power;
struct sde_power_client *dp_core_client;
struct sde_power_handle *phandle;
bool core_clks_on;
bool link_clks_on;
bool strm0_clks_on;
bool strm1_clks_on;
};
static int dp_power_regulator_init(struct dp_power_private *power)
{
int rc = 0, i = 0, j = 0;
struct platform_device *pdev;
struct dp_parser *parser;
parser = power->parser;
pdev = power->pdev;
for (i = DP_CORE_PM; !rc && (i < DP_MAX_PM); i++) {
rc = msm_dss_config_vreg(&pdev->dev,
parser->mp[i].vreg_config,
parser->mp[i].num_vreg, 1);
if (rc) {
pr_err("failed to init vregs for %s\n",
dp_parser_pm_name(i));
for (j = i - 1; j >= DP_CORE_PM; j--) {
msm_dss_config_vreg(&pdev->dev,
parser->mp[j].vreg_config,
parser->mp[j].num_vreg, 0);
}
goto error;
}
}
error:
return rc;
}
static void dp_power_regulator_deinit(struct dp_power_private *power)
{
int rc = 0, i = 0;
struct platform_device *pdev;
struct dp_parser *parser;
parser = power->parser;
pdev = power->pdev;
for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) {
rc = msm_dss_config_vreg(&pdev->dev,
parser->mp[i].vreg_config,
parser->mp[i].num_vreg, 0);
if (rc)
pr_err("failed to deinit vregs for %s\n",
dp_parser_pm_name(i));
}
}
static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
{
int rc = 0, i = 0, j = 0;
struct dp_parser *parser;
parser = power->parser;
for (i = DP_CORE_PM; i < DP_MAX_PM; i++) {
rc = msm_dss_enable_vreg(
parser->mp[i].vreg_config,
parser->mp[i].num_vreg, enable);
if (rc) {
pr_err("failed to '%s' vregs for %s\n",
enable ? "enable" : "disable",
dp_parser_pm_name(i));
if (enable) {
for (j = i-1; j >= DP_CORE_PM; j--) {
msm_dss_enable_vreg(
parser->mp[j].vreg_config,
parser->mp[j].num_vreg, 0);
}
}
goto error;
}
}
error:
return rc;
}
static int dp_power_pinctrl_set(struct dp_power_private *power, bool active)
{
int rc = -EFAULT;
struct pinctrl_state *pin_state;
struct dp_parser *parser;
parser = power->parser;
if (IS_ERR_OR_NULL(parser->pinctrl.pin))
return 0;
if (parser->no_aux_switch && parser->lphw_hpd) {
pin_state = active ? parser->pinctrl.state_hpd_ctrl
: parser->pinctrl.state_hpd_tlmm;
if (!IS_ERR_OR_NULL(pin_state)) {
rc = pinctrl_select_state(parser->pinctrl.pin,
pin_state);
if (rc) {
pr_err("cannot direct hpd line to %s\n",
active ? "ctrl" : "tlmm");
return rc;
}
}
}
if (parser->no_aux_switch)
return 0;
pin_state = active ? parser->pinctrl.state_active
: parser->pinctrl.state_suspend;
if (!IS_ERR_OR_NULL(pin_state)) {
rc = pinctrl_select_state(parser->pinctrl.pin,
pin_state);
if (rc)
pr_err("can not set %s pins\n",
active ? "dp_active"
: "dp_sleep");
} else {
pr_err("invalid '%s' pinstate\n",
active ? "dp_active"
: "dp_sleep");
}
return rc;
}
static int dp_power_clk_init(struct dp_power_private *power, bool enable)
{
int rc = 0;
struct device *dev;
enum dp_pm_type module;
dev = &power->pdev->dev;
if (enable) {
for (module = DP_CORE_PM; module < DP_MAX_PM; module++) {
struct dss_module_power *pm =
&power->parser->mp[module];
if (!pm->num_clk)
continue;
rc = msm_dss_get_clk(dev, pm->clk_config, pm->num_clk);
if (rc) {
pr_err("failed to get %s clk. err=%d\n",
dp_parser_pm_name(module), rc);
goto exit;
}
}
power->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg");
if (IS_ERR(power->pixel_clk_rcg)) {
pr_debug("Unable to get DP pixel clk RCG\n");
power->pixel_clk_rcg = NULL;
}
power->pixel_parent = devm_clk_get(dev, "pixel_parent");
if (IS_ERR(power->pixel_parent)) {
pr_debug("Unable to get DP pixel RCG parent\n");
power->pixel_parent = NULL;
}
power->pixel1_clk_rcg = devm_clk_get(dev, "pixel1_clk_rcg");
if (IS_ERR(power->pixel1_clk_rcg)) {
pr_debug("Unable to get DP pixel1 clk RCG\n");
power->pixel1_clk_rcg = NULL;
}
power->pixel1_parent = devm_clk_get(dev, "pixel1_parent");
if (IS_ERR(power->pixel1_parent)) {
pr_debug("Unable to get DP pixel1 RCG parent\n");
power->pixel1_parent = NULL;
}
} else {
if (power->pixel_parent)
devm_clk_put(dev, power->pixel_parent);
if (power->pixel_clk_rcg)
devm_clk_put(dev, power->pixel_clk_rcg);
if (power->pixel1_parent)
devm_clk_put(dev, power->pixel1_parent);
if (power->pixel1_clk_rcg)
devm_clk_put(dev, power->pixel1_clk_rcg);
for (module = DP_CORE_PM; module < DP_MAX_PM; module++) {
struct dss_module_power *pm =
&power->parser->mp[module];
if (!pm->num_clk)
continue;
msm_dss_put_clk(pm->clk_config, pm->num_clk);
}
}
exit:
return rc;
}
static int dp_power_clk_set_rate(struct dp_power_private *power,
enum dp_pm_type module, bool enable)
{
int rc = 0;
struct dss_module_power *mp;
if (!power) {
pr_err("invalid power data\n");
rc = -EINVAL;
goto exit;
}
mp = &power->parser->mp[module];
if (enable) {
rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
if (rc) {
pr_err("failed to set clks rate.\n");
goto exit;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 1);
if (rc) {
pr_err("failed to enable clks\n");
goto exit;
}
} else {
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 0);
if (rc) {
pr_err("failed to disable clks\n");
goto exit;
}
}
exit:
return rc;
}
static int dp_power_clk_enable(struct dp_power *dp_power,
enum dp_pm_type pm_type, bool enable)
{
int rc = 0;
struct dss_module_power *mp;
struct dp_power_private *power;
if (!dp_power) {
pr_err("invalid power data\n");
rc = -EINVAL;
goto error;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
mp = &power->parser->mp[pm_type];
if (pm_type >= DP_MAX_PM) {
pr_err("unsupported power module: %s\n",
dp_parser_pm_name(pm_type));
return -EINVAL;
}
if (enable) {
if (pm_type == DP_CORE_PM && power->core_clks_on) {
pr_debug("core clks already enabled\n");
return 0;
}
if ((pm_type == DP_STREAM0_PM) && (power->strm0_clks_on)) {
pr_debug("strm0 clks already enabled\n");
return 0;
}
if ((pm_type == DP_STREAM1_PM) && (power->strm1_clks_on)) {
pr_debug("strm1 clks already enabled\n");
return 0;
}
if ((pm_type == DP_CTRL_PM) && (!power->core_clks_on)) {
pr_debug("Need to enable core clks before link clks\n");
rc = dp_power_clk_set_rate(power, pm_type, enable);
if (rc) {
pr_err("failed to enable clks: %s. err=%d\n",
dp_parser_pm_name(DP_CORE_PM), rc);
goto error;
} else {
power->core_clks_on = true;
}
}
if (pm_type == DP_LINK_PM && power->link_clks_on) {
pr_debug("links clks already enabled\n");
return 0;
}
}
rc = dp_power_clk_set_rate(power, pm_type, enable);
if (rc) {
pr_err("failed to '%s' clks for: %s. err=%d\n",
enable ? "enable" : "disable",
dp_parser_pm_name(pm_type), rc);
goto error;
}
if (pm_type == DP_CORE_PM)
power->core_clks_on = enable;
else if (pm_type == DP_STREAM0_PM)
power->strm0_clks_on = enable;
else if (pm_type == DP_STREAM1_PM)
power->strm1_clks_on = enable;
else if (pm_type == DP_LINK_PM)
power->link_clks_on = enable;
/*
* This log is printed only when user connects or disconnects
* a DP cable. As this is a user-action and not a frequent
* usecase, it is not going to flood the kernel logs. Also,
* helpful in debugging the NOC issues.
*/
pr_info("core:%s link:%s strm0:%s strm1:%s\n",
power->core_clks_on ? "on" : "off",
power->link_clks_on ? "on" : "off",
power->strm0_clks_on ? "on" : "off",
power->strm1_clks_on ? "on" : "off");
error:
return rc;
}
static int dp_power_request_gpios(struct dp_power_private *power)
{
int rc = 0, i;
struct device *dev;
struct dss_module_power *mp;
static const char * const gpio_names[] = {
"aux_enable", "aux_sel", "usbplug_cc",
};
if (!power) {
pr_err("invalid power data\n");
return -EINVAL;
}
dev = &power->pdev->dev;
mp = &power->parser->mp[DP_CORE_PM];
for (i = 0; i < ARRAY_SIZE(gpio_names); i++) {
unsigned int gpio = mp->gpio_config[i].gpio;
if (gpio_is_valid(gpio)) {
rc = devm_gpio_request(dev, gpio, gpio_names[i]);
if (rc) {
pr_err("request %s gpio failed, rc=%d\n",
gpio_names[i], rc);
goto error;
}
}
}
return 0;
error:
for (i = 0; i < ARRAY_SIZE(gpio_names); i++) {
unsigned int gpio = mp->gpio_config[i].gpio;
if (gpio_is_valid(gpio))
gpio_free(gpio);
}
return rc;
}
static bool dp_power_find_gpio(const char *gpio1, const char *gpio2)
{
return !!strnstr(gpio1, gpio2, strlen(gpio1));
}
static void dp_power_set_gpio(struct dp_power_private *power, bool flip)
{
int i;
struct dss_module_power *mp = &power->parser->mp[DP_CORE_PM];
struct dss_gpio *config = mp->gpio_config;
for (i = 0; i < mp->num_gpio; i++) {
if (dp_power_find_gpio(config->gpio_name, "aux-sel"))
config->value = flip;
if (gpio_is_valid(config->gpio)) {
pr_debug("gpio %s, value %d\n", config->gpio_name,
config->value);
if (dp_power_find_gpio(config->gpio_name, "aux-en") ||
dp_power_find_gpio(config->gpio_name, "aux-sel"))
gpio_direction_output(config->gpio,
config->value);
else
gpio_set_value(config->gpio, config->value);
}
config++;
}
}
static int dp_power_config_gpios(struct dp_power_private *power, bool flip,
bool enable)
{
int rc = 0, i;
struct dss_module_power *mp;
struct dss_gpio *config;
if (power->parser->no_aux_switch)
return 0;
mp = &power->parser->mp[DP_CORE_PM];
config = mp->gpio_config;
if (enable) {
rc = dp_power_request_gpios(power);
if (rc) {
pr_err("gpio request failed\n");
return rc;
}
dp_power_set_gpio(power, flip);
} else {
for (i = 0; i < mp->num_gpio; i++) {
if (gpio_is_valid(config[i].gpio)) {
gpio_set_value(config[i].gpio, 0);
gpio_free(config[i].gpio);
}
}
}
return 0;
}
static int dp_power_client_init(struct dp_power *dp_power,
struct sde_power_handle *phandle)
{
int rc = 0;
struct dp_power_private *power;
char dp_client_name[DP_CLIENT_NAME_SIZE];
if (!dp_power) {
pr_err("invalid power data\n");
return -EINVAL;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
rc = dp_power_regulator_init(power);
if (rc) {
pr_err("failed to init regulators\n");
goto error_power;
}
rc = dp_power_clk_init(power, true);
if (rc) {
pr_err("failed to init clocks\n");
goto error_clk;
}
power->phandle = phandle;
snprintf(dp_client_name, DP_CLIENT_NAME_SIZE, "dp_core_client");
power->dp_core_client = sde_power_client_create(phandle,
dp_client_name);
if (IS_ERR_OR_NULL(power->dp_core_client)) {
pr_err("[%s] client creation failed for DP\n", dp_client_name);
rc = -EINVAL;
goto error_client;
}
return 0;
error_client:
dp_power_clk_init(power, false);
error_clk:
dp_power_regulator_deinit(power);
error_power:
return rc;
}
static void dp_power_client_deinit(struct dp_power *dp_power)
{
struct dp_power_private *power;
if (!dp_power) {
pr_err("invalid power data\n");
return;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
sde_power_client_destroy(power->phandle, power->dp_core_client);
dp_power_clk_init(power, false);
dp_power_regulator_deinit(power);
}
static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power, u32 strm_id)
{
int rc = 0;
struct dp_power_private *power;
if (!dp_power || strm_id >= DP_STREAM_MAX) {
pr_err("invalid power data. stream %d\n", strm_id);
rc = -EINVAL;
goto exit;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
if (strm_id == DP_STREAM_0) {
if (power->pixel_clk_rcg && power->pixel_parent)
clk_set_parent(power->pixel_clk_rcg,
power->pixel_parent);
} else if (strm_id == DP_STREAM_1) {
if (power->pixel1_clk_rcg && power->pixel1_parent)
clk_set_parent(power->pixel1_clk_rcg,
power->pixel1_parent);
}
exit:
return rc;
}
static u64 dp_power_clk_get_rate(struct dp_power *dp_power, char *clk_name)
{
size_t i;
enum dp_pm_type j;
struct dss_module_power *mp;
struct dp_power_private *power;
bool clk_found = false;
u64 rate = 0;
if (!clk_name) {
pr_err("invalid pointer for clk_name\n");
return 0;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
mp = &power->phandle->mp;
for (i = 0; i < mp->num_clk; i++) {
if (!strcmp(mp->clk_config[i].clk_name, clk_name)) {
rate = clk_get_rate(mp->clk_config[i].clk);
clk_found = true;
break;
}
}
for (j = DP_CORE_PM; j < DP_MAX_PM && !clk_found; j++) {
mp = &power->parser->mp[j];
for (i = 0; i < mp->num_clk; i++) {
if (!strcmp(mp->clk_config[i].clk_name, clk_name)) {
rate = clk_get_rate(mp->clk_config[i].clk);
clk_found = true;
break;
}
}
}
return rate;
}
static int dp_power_init(struct dp_power *dp_power, bool flip)
{
int rc = 0;
struct dp_power_private *power;
if (!dp_power) {
pr_err("invalid power data\n");
rc = -EINVAL;
goto exit;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
rc = dp_power_regulator_ctrl(power, true);
if (rc) {
pr_err("failed to enable regulators\n");
goto exit;
}
rc = dp_power_pinctrl_set(power, true);
if (rc) {
pr_err("failed to set pinctrl state\n");
goto err_pinctrl;
}
rc = dp_power_config_gpios(power, flip, true);
if (rc) {
pr_err("failed to enable gpios\n");
goto err_gpio;
}
rc = sde_power_resource_enable(power->phandle,
power->dp_core_client, true);
if (rc) {
pr_err("Power resource enable failed\n");
goto err_sde_power;
}
rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
if (rc) {
pr_err("failed to enable DP core clocks\n");
goto err_clk;
}
return 0;
err_clk:
sde_power_resource_enable(power->phandle, power->dp_core_client, false);
err_sde_power:
dp_power_config_gpios(power, flip, false);
err_gpio:
dp_power_pinctrl_set(power, false);
err_pinctrl:
dp_power_regulator_ctrl(power, false);
exit:
return rc;
}
static int dp_power_deinit(struct dp_power *dp_power)
{
int rc = 0;
struct dp_power_private *power;
if (!dp_power) {
pr_err("invalid power data\n");
rc = -EINVAL;
goto exit;
}
power = container_of(dp_power, struct dp_power_private, dp_power);
if (power->link_clks_on)
dp_power_clk_enable(dp_power, DP_LINK_PM, false);
dp_power_clk_enable(dp_power, DP_CORE_PM, false);
rc = sde_power_resource_enable(power->phandle,
power->dp_core_client, false);
if (rc) {
pr_err("Power resource disable failed, rc=%d\n", rc);
goto exit;
}
dp_power_config_gpios(power, false, false);
dp_power_pinctrl_set(power, false);
dp_power_regulator_ctrl(power, false);
exit:
return rc;
}
struct dp_power *dp_power_get(struct dp_parser *parser)
{
int rc = 0;
struct dp_power_private *power;
struct dp_power *dp_power;
if (!parser) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
if (!power) {
rc = -ENOMEM;
goto error;
}
power->parser = parser;
power->pdev = parser->pdev;
dp_power = &power->dp_power;
dp_power->init = dp_power_init;
dp_power->deinit = dp_power_deinit;
dp_power->clk_enable = dp_power_clk_enable;
dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent;
dp_power->clk_get_rate = dp_power_clk_get_rate;
dp_power->power_client_init = dp_power_client_init;
dp_power->power_client_deinit = dp_power_client_deinit;
return dp_power;
error:
return ERR_PTR(rc);
}
void dp_power_put(struct dp_power *dp_power)
{
struct dp_power_private *power = NULL;
if (!dp_power)
return;
power = container_of(dp_power, struct dp_power_private, dp_power);
devm_kfree(&power->pdev->dev, power);
}

51
msm/dp/dp_power.h Normal file
View File

@@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_POWER_H_
#define _DP_POWER_H_
#include "dp_parser.h"
#include "sde_power_handle.h"
/**
* sruct dp_power - DisplayPort's power related data
*
* @init: initializes the regulators/core clocks/GPIOs/pinctrl
* @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
* @clk_enable: enable/disable the DP clocks
* @set_pixel_clk_parent: set the parent of DP pixel clock
* @clk_get_rate: get the current rate for provided clk_name
*/
struct dp_power {
int (*init)(struct dp_power *power, bool flip);
int (*deinit)(struct dp_power *power);
int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type,
bool enable);
int (*set_pixel_clk_parent)(struct dp_power *power, u32 stream_id);
u64 (*clk_get_rate)(struct dp_power *power, char *clk_name);
int (*power_client_init)(struct dp_power *power,
struct sde_power_handle *phandle);
void (*power_client_deinit)(struct dp_power *power);
};
/**
* dp_power_get() - configure and get the DisplayPort power module data
*
* @parser: instance of parser module
* return: pointer to allocated power module data
*
* This API will configure the DisplayPort's power module and provides
* methods to be called by the client to configure the power related
* modueles.
*/
struct dp_power *dp_power_get(struct dp_parser *parser);
/**
* dp_power_put() - release the power related resources
*
* @power: pointer to the power module's data
*/
void dp_power_put(struct dp_power *power);
#endif /* _DP_POWER_H_ */

437
msm/dp/dp_reg.h Normal file
View File

@@ -0,0 +1,437 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_REG_H_
#define _DP_REG_H_
/* DP_TX Registers */
#define DP_HW_VERSION (0x00000000)
#define DP_SW_RESET (0x00000010)
#define DP_PHY_CTRL (0x00000014)
#define DP_CLK_CTRL (0x00000018)
#define DP_CLK_ACTIVE (0x0000001C)
#define DP_INTR_STATUS (0x00000020)
#define DP_INTR_STATUS2 (0x00000024)
#define DP_INTR_STATUS3 (0x00000028)
#define DP_INTR_STATUS5 (0x00000034)
#define DP_DP_HPD_CTRL (0x00000000)
#define DP_DP_HPD_INT_STATUS (0x00000004)
#define DP_DP_HPD_INT_ACK (0x00000008)
#define DP_DP_HPD_INT_MASK (0x0000000C)
#define DP_DP_HPD_REFTIMER (0x00000018)
#define DP_DP_HPD_EVENT_TIME_0 (0x0000001C)
#define DP_DP_HPD_EVENT_TIME_1 (0x00000020)
#define DP_AUX_CTRL (0x00000030)
#define DP_AUX_DATA (0x00000034)
#define DP_AUX_TRANS_CTRL (0x00000038)
#define DP_TIMEOUT_COUNT (0x0000003C)
#define DP_AUX_LIMITS (0x00000040)
#define DP_AUX_STATUS (0x00000044)
#define DP_DPCD_CP_IRQ (0x201)
#define DP_DPCD_RXSTATUS (0x69493)
#define DP_INTERRUPT_TRANS_NUM (0x000000A0)
#define DP_MAINLINK_CTRL (0x00000000)
#define DP_STATE_CTRL (0x00000004)
#define DP_CONFIGURATION_CTRL (0x00000008)
#define DP_SOFTWARE_MVID (0x00000010)
#define DP_SOFTWARE_NVID (0x00000018)
#define DP_TOTAL_HOR_VER (0x0000001C)
#define DP_START_HOR_VER_FROM_SYNC (0x00000020)
#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024)
#define DP_ACTIVE_HOR_VER (0x00000028)
#define DP_MISC1_MISC0 (0x0000002C)
#define DP_VALID_BOUNDARY (0x00000030)
#define DP_VALID_BOUNDARY_2 (0x00000034)
#define DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038)
#define DP1_CONFIGURATION_CTRL (0x00000400)
#define DP_DP0_TIMESLOT_1_32 (0x00000404)
#define DP_DP0_TIMESLOT_33_63 (0x00000408)
#define DP_DP1_TIMESLOT_1_32 (0x0000040C)
#define DP_DP1_TIMESLOT_33_63 (0x00000410)
#define DP1_SOFTWARE_MVID (0x00000414)
#define DP1_SOFTWARE_NVID (0x00000418)
#define DP1_TOTAL_HOR_VER (0x0000041C)
#define DP1_START_HOR_VER_FROM_SYNC (0x00000420)
#define DP1_HSYNC_VSYNC_WIDTH_POLARITY (0x00000424)
#define DP1_ACTIVE_HOR_VER (0x00000428)
#define DP1_MISC1_MISC0 (0x0000042C)
#define DP_DP0_RG (0x000004F8)
#define DP_DP1_RG (0x000004FC)
#define DP_MST_ACT (0x00000500)
#define DP_MST_MAINLINK_READY (0x00000504)
#define DP_MAINLINK_READY (0x00000040)
#define DP_MAINLINK_LEVELS (0x00000044)
#define DP_TU (0x0000004C)
#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0)
#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4)
#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8)
#define MMSS_DP_MISC1_MISC0 (0x0000002C)
#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080)
#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084)
#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088)
#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C)
#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090)
#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094)
#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098)
#define MMSS_DP_PSR_CRC_RG (0x00000154)
#define MMSS_DP_PSR_CRC_B (0x00000158)
#define DP_COMPRESSION_MODE_CTRL (0x00000180)
#define DP_PPS_HB_0_3 (0x00000184)
#define DP_PPS_PB_0_3 (0x00000188)
#define DP_PPS_PB_4_7 (0x0000018C)
#define DP_PPS_PB_8_11 (0x00000190)
#define DP_PPS_PB_12_15 (0x00000194)
#define DP_PPS_PB_16_19 (0x00000198)
#define DP_PPS_PB_20_23 (0x0000019C)
#define DP_PPS_PB_24_27 (0x000001A0)
#define DP_PPS_PB_28_31 (0x000001A4)
#define DP_PPS_PPS_0_3 (0x000001A8)
#define DP_PPS_PPS_4_7 (0x000001AC)
#define DP_PPS_PPS_8_11 (0x000001B0)
#define DP_PPS_PPS_12_15 (0x000001B4)
#define DP_PPS_PPS_16_19 (0x000001B8)
#define DP_PPS_PPS_20_23 (0x000001BC)
#define DP_PPS_PPS_24_27 (0x000001C0)
#define DP_PPS_PPS_28_31 (0x000001C4)
#define DP_PPS_PPS_32_35 (0x000001C8)
#define DP_PPS_PPS_36_39 (0x000001CC)
#define DP_PPS_PPS_40_43 (0x000001D0)
#define DP_PPS_PPS_44_47 (0x000001D4)
#define DP_PPS_PPS_48_51 (0x000001D8)
#define DP_PPS_PPS_52_55 (0x000001DC)
#define DP_PPS_PPS_56_59 (0x000001E0)
#define DP_PPS_PPS_60_63 (0x000001E4)
#define DP_PPS_PPS_64_67 (0x000001E8)
#define DP_PPS_PPS_68_71 (0x000001EC)
#define DP_PPS_PPS_72_75 (0x000001F0)
#define DP_PPS_PPS_76_79 (0x000001F4)
#define DP_PPS_PPS_80_83 (0x000001F8)
#define DP_PPS_PPS_84_87 (0x000001FC)
#define MMSS_DP_AUDIO_CFG (0x00000200)
#define MMSS_DP_AUDIO_STATUS (0x00000204)
#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208)
#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C)
#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210)
#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214)
#define MMSS_DP_SDP_CFG (0x00000228)
#define MMSS_DP_SDP_CFG2 (0x0000022C)
#define MMSS_DP_SDP_CFG3 (0x0000024C)
#define MMSS_DP_SDP_CFG4 (0x000004EC)
#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230)
#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234)
#define MMSS_DP_AUDIO_STREAM_0 (0x00000240)
#define MMSS_DP_AUDIO_STREAM_1 (0x00000244)
#define MMSS_DP_EXTENSION_0 (0x00000250)
#define MMSS_DP_EXTENSION_1 (0x00000254)
#define MMSS_DP_EXTENSION_2 (0x00000258)
#define MMSS_DP_EXTENSION_3 (0x0000025C)
#define MMSS_DP_EXTENSION_4 (0x00000260)
#define MMSS_DP_EXTENSION_5 (0x00000264)
#define MMSS_DP_EXTENSION_6 (0x00000268)
#define MMSS_DP_EXTENSION_7 (0x0000026C)
#define MMSS_DP_EXTENSION_8 (0x00000270)
#define MMSS_DP_EXTENSION_9 (0x00000274)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288)
#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C)
#define MMSS_DP_AUDIO_ISRC_0 (0x00000290)
#define MMSS_DP_AUDIO_ISRC_1 (0x00000294)
#define MMSS_DP_AUDIO_ISRC_2 (0x00000298)
#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C)
#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0)
#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4)
#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8)
#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC)
#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0)
#define MMSS_DP_FLUSH (0x000002F8)
#define MMSS_DP1_FLUSH (0x000002FC)
#define MMSS_DP_GENERIC0_0 (0x00000300)
#define MMSS_DP_GENERIC0_1 (0x00000304)
#define MMSS_DP_GENERIC0_2 (0x00000308)
#define MMSS_DP_GENERIC0_3 (0x0000030C)
#define MMSS_DP_GENERIC0_4 (0x00000310)
#define MMSS_DP_GENERIC0_5 (0x00000314)
#define MMSS_DP_GENERIC0_6 (0x00000318)
#define MMSS_DP_GENERIC0_7 (0x0000031C)
#define MMSS_DP_GENERIC0_8 (0x00000320)
#define MMSS_DP_GENERIC0_9 (0x00000324)
#define MMSS_DP_GENERIC1_0 (0x00000328)
#define MMSS_DP_GENERIC1_1 (0x0000032C)
#define MMSS_DP_GENERIC1_2 (0x00000330)
#define MMSS_DP_GENERIC1_3 (0x00000334)
#define MMSS_DP_GENERIC1_4 (0x00000338)
#define MMSS_DP_GENERIC1_5 (0x0000033C)
#define MMSS_DP_GENERIC1_6 (0x00000340)
#define MMSS_DP_GENERIC1_7 (0x00000344)
#define MMSS_DP_GENERIC1_8 (0x00000348)
#define MMSS_DP_GENERIC1_9 (0x0000034C)
#define MMSS_DP1_GENERIC0_0 (0x00000490)
#define MMSS_DP1_GENERIC0_1 (0x00000494)
#define MMSS_DP1_GENERIC0_2 (0x00000498)
#define MMSS_DP1_GENERIC0_3 (0x0000049C)
#define MMSS_DP1_GENERIC0_4 (0x000004A0)
#define MMSS_DP1_GENERIC0_5 (0x000004A4)
#define MMSS_DP1_GENERIC0_6 (0x000004A8)
#define MMSS_DP1_GENERIC0_7 (0x000004AC)
#define MMSS_DP1_GENERIC0_8 (0x000004B0)
#define MMSS_DP1_GENERIC0_9 (0x000004B4)
#define MMSS_DP1_GENERIC1_0 (0x000004B8)
#define MMSS_DP1_GENERIC1_1 (0x000004BC)
#define MMSS_DP1_GENERIC1_2 (0x000004C0)
#define MMSS_DP1_GENERIC1_3 (0x000004C4)
#define MMSS_DP1_GENERIC1_4 (0x000004C8)
#define MMSS_DP1_GENERIC1_5 (0x000004CC)
#define MMSS_DP1_GENERIC1_6 (0x000004D0)
#define MMSS_DP1_GENERIC1_7 (0x000004D4)
#define MMSS_DP1_GENERIC1_8 (0x000004D8)
#define MMSS_DP1_GENERIC1_9 (0x000004DC)
#define MMSS_DP_GENERIC2_0 (0x000003d8)
#define MMSS_DP_GENERIC2_1 (0x000003dc)
#define MMSS_DP_GENERIC2_2 (0x000003e0)
#define MMSS_DP_GENERIC2_3 (0x000003e4)
#define MMSS_DP_GENERIC2_4 (0x000003e8)
#define MMSS_DP_GENERIC2_5 (0x000003ec)
#define MMSS_DP_GENERIC2_6 (0x000003f0)
#define MMSS_DP_GENERIC2_7 (0x000003f4)
#define MMSS_DP_GENERIC2_8 (0x000003f8)
#define MMSS_DP_GENERIC2_9 (0x000003fc)
#define MMSS_DP1_GENERIC2_0 (0x00000510)
#define MMSS_DP1_GENERIC2_1 (0x00000514)
#define MMSS_DP1_GENERIC2_2 (0x00000518)
#define MMSS_DP1_GENERIC2_3 (0x0000051c)
#define MMSS_DP1_GENERIC2_4 (0x00000520)
#define MMSS_DP1_GENERIC2_5 (0x00000524)
#define MMSS_DP1_GENERIC2_6 (0x00000528)
#define MMSS_DP1_GENERIC2_7 (0x0000052C)
#define MMSS_DP1_GENERIC2_8 (0x00000530)
#define MMSS_DP1_GENERIC2_9 (0x00000534)
#define MMSS_DP1_SDP_CFG (0x000004E0)
#define MMSS_DP1_SDP_CFG2 (0x000004E4)
#define MMSS_DP1_SDP_CFG3 (0x000004E8)
#define MMSS_DP1_SDP_CFG4 (0x000004F0)
#define DP1_COMPRESSION_MODE_CTRL (0x00000560)
#define DP1_PPS_HB_0_3 (0x00000564)
#define DP1_PPS_PB_0_3 (0x00000568)
#define DP1_PPS_PB_4_7 (0x0000056C)
#define DP1_PPS_PB_8_11 (0x00000570)
#define DP1_PPS_PB_12_15 (0x00000574)
#define DP1_PPS_PB_16_19 (0x00000578)
#define DP1_PPS_PB_20_23 (0x0000057C)
#define DP1_PPS_PB_24_27 (0x00000580)
#define DP1_PPS_PB_28_31 (0x00000584)
#define DP1_PPS_PPS_0_3 (0x00000588)
#define DP1_PPS_PPS_4_7 (0x0000058C)
#define DP1_PPS_PPS_8_11 (0x00000590)
#define DP1_PPS_PPS_12_15 (0x00000594)
#define DP1_PPS_PPS_16_19 (0x00000598)
#define DP1_PPS_PPS_20_23 (0x0000059C)
#define DP1_PPS_PPS_24_27 (0x000005A0)
#define DP1_PPS_PPS_28_31 (0x000005A4)
#define DP1_PPS_PPS_32_35 (0x000005A8)
#define DP1_PPS_PPS_36_39 (0x000005AC)
#define DP1_PPS_PPS_40_43 (0x000005B0)
#define DP1_PPS_PPS_44_47 (0x000005B4)
#define DP1_PPS_PPS_48_51 (0x000005B8)
#define DP1_PPS_PPS_52_55 (0x000005BC)
#define DP1_PPS_PPS_56_59 (0x000005C0)
#define DP1_PPS_PPS_60_63 (0x000005C4)
#define DP1_PPS_PPS_64_67 (0x000005C8)
#define DP1_PPS_PPS_68_71 (0x000005CC)
#define DP1_PPS_PPS_72_75 (0x000005D0)
#define DP1_PPS_PPS_76_79 (0x000005D4)
#define DP1_PPS_PPS_80_83 (0x000005D8)
#define DP1_PPS_PPS_84_87 (0x000005DC)
#define MMSS_DP_VSCEXT_0 (0x000002D0)
#define MMSS_DP_VSCEXT_1 (0x000002D4)
#define MMSS_DP_VSCEXT_2 (0x000002D8)
#define MMSS_DP_VSCEXT_3 (0x000002DC)
#define MMSS_DP_VSCEXT_4 (0x000002E0)
#define MMSS_DP_VSCEXT_5 (0x000002E4)
#define MMSS_DP_VSCEXT_6 (0x000002E8)
#define MMSS_DP_VSCEXT_7 (0x000002EC)
#define MMSS_DP_VSCEXT_8 (0x000002F0)
#define MMSS_DP_VSCEXT_9 (0x000002F4)
#define MMSS_DP1_VSCEXT_0 (0x00000468)
#define MMSS_DP1_VSCEXT_1 (0x0000046c)
#define MMSS_DP1_VSCEXT_2 (0x00000470)
#define MMSS_DP1_VSCEXT_3 (0x00000474)
#define MMSS_DP1_VSCEXT_4 (0x00000478)
#define MMSS_DP1_VSCEXT_5 (0x0000047c)
#define MMSS_DP1_VSCEXT_6 (0x00000480)
#define MMSS_DP1_VSCEXT_7 (0x00000484)
#define MMSS_DP1_VSCEXT_8 (0x00000488)
#define MMSS_DP1_VSCEXT_9 (0x0000048c)
#define MMSS_DP_BIST_ENABLE (0x00000000)
#define MMSS_DP_TIMING_ENGINE_EN (0x00000010)
#define MMSS_DP_INTF_CONFIG (0x00000014)
#define MMSS_DP_INTF_HSYNC_CTL (0x00000018)
#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C)
#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020)
#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024)
#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028)
#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C)
#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030)
#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034)
#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038)
#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C)
#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040)
#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044)
#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048)
#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C)
#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050)
#define MMSS_DP_INTF_POLARITY_CTL (0x00000058)
#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060)
#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064)
#define MMSS_DP_DSC_DTO (0x0000007C)
#define MMSS_DP_DSC_DTO_COUNT (0x00000084)
#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088)
#define MMSS_DP1_BIST_ENABLE (0x00000000)
#define MMSS_DP1_TIMING_ENGINE_EN (0x00000010)
#define MMSS_DP1_INTF_CONFIG (0x00000014)
#define MMSS_DP1_INTF_HSYNC_CTL (0x00000018)
#define MMSS_DP1_INTF_VSYNC_PERIOD_F0 (0x0000001C)
#define MMSS_DP1_INTF_VSYNC_PERIOD_F1 (0x00000020)
#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024)
#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028)
#define MMSS_DP1_INTF_DISPLAY_V_START_F0 (0x0000002C)
#define MMSS_DP1_INTF_DISPLAY_V_START_F1 (0x00000030)
#define MMSS_DP1_INTF_DISPLAY_V_END_F0 (0x00000034)
#define MMSS_DP1_INTF_DISPLAY_V_END_F1 (0x00000038)
#define MMSS_DP1_INTF_ACTIVE_V_START_F0 (0x0000003C)
#define MMSS_DP1_INTF_ACTIVE_V_START_F1 (0x00000040)
#define MMSS_DP1_INTF_ACTIVE_V_END_F0 (0x00000044)
#define MMSS_DP1_INTF_ACTIVE_V_END_F1 (0x00000048)
#define MMSS_DP1_INTF_DISPLAY_HCTL (0x0000004C)
#define MMSS_DP1_INTF_ACTIVE_HCTL (0x00000050)
#define MMSS_DP1_INTF_POLARITY_CTL (0x00000058)
#define MMSS_DP1_TPG_MAIN_CONTROL (0x00000060)
#define MMSS_DP1_TPG_VIDEO_CONFIG (0x00000064)
#define MMSS_DP1_DSC_DTO (0x0000007C)
#define MMSS_DP1_DSC_DTO_COUNT (0x00000084)
#define MMSS_DP1_ASYNC_FIFO_CONFIG (0x00000088)
/*DP PHY Register offsets */
#define DP_PHY_REVISION_ID0 (0x00000000)
#define DP_PHY_REVISION_ID1 (0x00000004)
#define DP_PHY_REVISION_ID2 (0x00000008)
#define DP_PHY_REVISION_ID3 (0x0000000C)
#define DP_PHY_CFG (0x00000010)
#define DP_PHY_PD_CTL (0x00000018)
#define DP_PHY_MODE (0x0000001C)
#define DP_PHY_AUX_CFG0 (0x00000020)
#define DP_PHY_AUX_CFG1 (0x00000024)
#define DP_PHY_AUX_CFG2 (0x00000028)
#define DP_PHY_AUX_CFG3 (0x0000002C)
#define DP_PHY_AUX_CFG4 (0x00000030)
#define DP_PHY_AUX_CFG5 (0x00000034)
#define DP_PHY_AUX_CFG6 (0x00000038)
#define DP_PHY_AUX_CFG7 (0x0000003C)
#define DP_PHY_AUX_CFG8 (0x00000040)
#define DP_PHY_AUX_CFG9 (0x00000044)
#define DP_PHY_AUX_INTERRUPT_MASK (0x00000048)
#define DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C)
#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC)
#define DP_PHY_AUX_INTERRUPT_MASK_V200 (0x00000048)
#define DP_PHY_AUX_INTERRUPT_CLEAR_V200 (0x0000004C)
#define DP_PHY_AUX_INTERRUPT_STATUS_V200 (0x000000BC)
#define DP_PHY_SPARE0 (0x00AC)
#define TXn_TX_EMP_POST1_LVL (0x000C)
#define TXn_TX_DRV_LVL (0x001C)
#define TXn_TX_POL_INV (0x0064)
#define DP_PHY_AUX_INTERRUPT_MASK_V420 (0x0054)
#define DP_PHY_AUX_INTERRUPT_CLEAR_V420 (0x0058)
#define DP_PHY_AUX_INTERRUPT_STATUS_V420 (0x00D8)
#define DP_PHY_SPARE0_V420 (0x00C8)
#define TXn_TX_DRV_LVL_V420 (0x0014)
#define TXn_TX_POL_INV_V420 (0x005C)
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004)
/* DP MMSS_CC registers */
#define MMSS_DP_LINK_CMD_RCGR (0x0138)
#define MMSS_DP_LINK_CFG_RCGR (0x013C)
#define MMSS_DP_PIXEL_M (0x01B4)
#define MMSS_DP_PIXEL_N (0x01B8)
#define MMSS_DP_PIXEL1_M (0x01CC)
#define MMSS_DP_PIXEL1_N (0x01D0)
#define MMSS_DP_PIXEL_M_V200 (0x0130)
#define MMSS_DP_PIXEL_N_V200 (0x0134)
#define MMSS_DP_PIXEL1_M_V200 (0x0148)
#define MMSS_DP_PIXEL1_N_V200 (0x014C)
#define MMSS_DP_PIXEL_M_V420 (0x01B4)
#define MMSS_DP_PIXEL_N_V420 (0x01B8)
#define MMSS_DP_PIXEL1_M_V420 (0x01CC)
#define MMSS_DP_PIXEL1_N_V420 (0x01D0)
/* DP HDCP 1.3 registers */
#define DP_HDCP_CTRL (0x0A0)
#define DP_HDCP_STATUS (0x0A4)
#define DP_HDCP_SW_UPPER_AKSV (0x098)
#define DP_HDCP_SW_LOWER_AKSV (0x09C)
#define DP_HDCP_ENTROPY_CTRL0 (0x350)
#define DP_HDCP_ENTROPY_CTRL1 (0x35C)
#define DP_HDCP_SHA_STATUS (0x0C8)
#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
#define DP_HDCP_RCVPORT_DATA3 (0x0A4)
#define DP_HDCP_RCVPORT_DATA4 (0x0A8)
#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
/* USB3 DP COM registers */
#define USB3_DP_COM_RESET_OVRD_CTRL (0x1C)
#define USB3_DP_COM_PHY_MODE_CTRL (0x00)
#define USB3_DP_COM_SW_RESET (0x04)
#define USB3_DP_COM_TYPEC_CTRL (0x10)
#define USB3_DP_COM_SWI_CTRL (0x0c)
#define USB3_DP_COM_POWER_DOWN_CTRL (0x08)
#endif /* _DP_REG_H_ */

563
msm/dp/dp_usbpd.c Normal file
View File

@@ -0,0 +1,563 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/usb/usbpd.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/delay.h>
#include "dp_usbpd.h"
/* DP specific VDM commands */
#define DP_USBPD_VDM_STATUS 0x10
#define DP_USBPD_VDM_CONFIGURE 0x11
/* USBPD-TypeC specific Macros */
#define VDM_VERSION 0x0
#define USB_C_DP_SID 0xFF01
enum dp_usbpd_pin_assignment {
DP_USBPD_PIN_A,
DP_USBPD_PIN_B,
DP_USBPD_PIN_C,
DP_USBPD_PIN_D,
DP_USBPD_PIN_E,
DP_USBPD_PIN_F,
DP_USBPD_PIN_MAX,
};
enum dp_usbpd_events {
DP_USBPD_EVT_DISCOVER,
DP_USBPD_EVT_ENTER,
DP_USBPD_EVT_STATUS,
DP_USBPD_EVT_CONFIGURE,
DP_USBPD_EVT_CC_PIN_POLARITY,
DP_USBPD_EVT_EXIT,
DP_USBPD_EVT_ATTENTION,
};
enum dp_usbpd_alt_mode {
DP_USBPD_ALT_MODE_NONE = 0,
DP_USBPD_ALT_MODE_INIT = BIT(0),
DP_USBPD_ALT_MODE_DISCOVER = BIT(1),
DP_USBPD_ALT_MODE_ENTER = BIT(2),
DP_USBPD_ALT_MODE_STATUS = BIT(3),
DP_USBPD_ALT_MODE_CONFIGURE = BIT(4),
};
struct dp_usbpd_capabilities {
enum dp_usbpd_port port;
bool receptacle_state;
u8 ulink_pin_config;
u8 dlink_pin_config;
};
struct dp_usbpd_private {
bool forced_disconnect;
u32 vdo;
struct device *dev;
struct usbpd *pd;
struct usbpd_svid_handler svid_handler;
struct dp_hpd_cb *dp_cb;
struct dp_usbpd_capabilities cap;
struct dp_usbpd dp_usbpd;
enum dp_usbpd_alt_mode alt_mode;
u32 dp_usbpd_config;
};
static const char *dp_usbpd_pin_name(u8 pin)
{
switch (pin) {
case DP_USBPD_PIN_A: return "DP_USBPD_PIN_ASSIGNMENT_A";
case DP_USBPD_PIN_B: return "DP_USBPD_PIN_ASSIGNMENT_B";
case DP_USBPD_PIN_C: return "DP_USBPD_PIN_ASSIGNMENT_C";
case DP_USBPD_PIN_D: return "DP_USBPD_PIN_ASSIGNMENT_D";
case DP_USBPD_PIN_E: return "DP_USBPD_PIN_ASSIGNMENT_E";
case DP_USBPD_PIN_F: return "DP_USBPD_PIN_ASSIGNMENT_F";
default: return "UNKNOWN";
}
}
static const char *dp_usbpd_port_name(enum dp_usbpd_port port)
{
switch (port) {
case DP_USBPD_PORT_NONE: return "DP_USBPD_PORT_NONE";
case DP_USBPD_PORT_UFP_D: return "DP_USBPD_PORT_UFP_D";
case DP_USBPD_PORT_DFP_D: return "DP_USBPD_PORT_DFP_D";
case DP_USBPD_PORT_D_UFP_D: return "DP_USBPD_PORT_D_UFP_D";
default: return "DP_USBPD_PORT_NONE";
}
}
static const char *dp_usbpd_cmd_name(u8 cmd)
{
switch (cmd) {
case USBPD_SVDM_DISCOVER_MODES: return "USBPD_SVDM_DISCOVER_MODES";
case USBPD_SVDM_ENTER_MODE: return "USBPD_SVDM_ENTER_MODE";
case USBPD_SVDM_ATTENTION: return "USBPD_SVDM_ATTENTION";
case DP_USBPD_VDM_STATUS: return "DP_USBPD_VDM_STATUS";
case DP_USBPD_VDM_CONFIGURE: return "DP_USBPD_VDM_CONFIGURE";
default: return "DP_USBPD_VDM_ERROR";
}
}
static void dp_usbpd_init_port(enum dp_usbpd_port *port, u32 in_port)
{
switch (in_port) {
case 0:
*port = DP_USBPD_PORT_NONE;
break;
case 1:
*port = DP_USBPD_PORT_UFP_D;
break;
case 2:
*port = DP_USBPD_PORT_DFP_D;
break;
case 3:
*port = DP_USBPD_PORT_D_UFP_D;
break;
default:
*port = DP_USBPD_PORT_NONE;
}
pr_debug("port:%s\n", dp_usbpd_port_name(*port));
}
static void dp_usbpd_get_capabilities(struct dp_usbpd_private *pd)
{
struct dp_usbpd_capabilities *cap = &pd->cap;
u32 buf = pd->vdo;
int port = buf & 0x3;
cap->receptacle_state = (buf & BIT(6)) ? true : false;
cap->dlink_pin_config = (buf >> 8) & 0xff;
cap->ulink_pin_config = (buf >> 16) & 0xff;
dp_usbpd_init_port(&cap->port, port);
}
static void dp_usbpd_get_status(struct dp_usbpd_private *pd)
{
struct dp_usbpd *status = &pd->dp_usbpd;
u32 buf = pd->vdo;
int port = buf & 0x3;
status->low_pow_st = (buf & BIT(2)) ? true : false;
status->adaptor_dp_en = (buf & BIT(3)) ? true : false;
status->base.multi_func = (buf & BIT(4)) ? true : false;
status->usb_config_req = (buf & BIT(5)) ? true : false;
status->exit_dp_mode = (buf & BIT(6)) ? true : false;
status->base.hpd_high = (buf & BIT(7)) ? true : false;
status->base.hpd_irq = (buf & BIT(8)) ? true : false;
pr_debug("low_pow_st = %d, adaptor_dp_en = %d, multi_func = %d\n",
status->low_pow_st, status->adaptor_dp_en,
status->base.multi_func);
pr_debug("usb_config_req = %d, exit_dp_mode = %d, hpd_high =%d\n",
status->usb_config_req,
status->exit_dp_mode, status->base.hpd_high);
pr_debug("hpd_irq = %d\n", status->base.hpd_irq);
dp_usbpd_init_port(&status->port, port);
}
static u32 dp_usbpd_gen_config_pkt(struct dp_usbpd_private *pd)
{
u8 pin_cfg, pin;
u32 config = 0;
const u32 ufp_d_config = 0x2, dp_ver = 0x1;
if (pd->cap.receptacle_state)
pin_cfg = pd->cap.ulink_pin_config;
else
pin_cfg = pd->cap.dlink_pin_config;
for (pin = DP_USBPD_PIN_A; pin < DP_USBPD_PIN_MAX; pin++) {
if (pin_cfg & BIT(pin)) {
if (pd->dp_usbpd.base.multi_func) {
if (pin == DP_USBPD_PIN_D)
break;
} else {
break;
}
}
}
if (pin == DP_USBPD_PIN_MAX)
pin = DP_USBPD_PIN_C;
pr_debug("pin assignment: %s\n", dp_usbpd_pin_name(pin));
config |= BIT(pin) << 8;
config |= (dp_ver << 2);
config |= ufp_d_config;
pr_debug("config = 0x%x\n", config);
return config;
}
static void dp_usbpd_send_event(struct dp_usbpd_private *pd,
enum dp_usbpd_events event)
{
u32 config;
switch (event) {
case DP_USBPD_EVT_DISCOVER:
usbpd_send_svdm(pd->pd, USB_C_DP_SID,
USBPD_SVDM_DISCOVER_MODES,
SVDM_CMD_TYPE_INITIATOR, 0x0, 0x0, 0x0);
break;
case DP_USBPD_EVT_ENTER:
usbpd_send_svdm(pd->pd, USB_C_DP_SID,
USBPD_SVDM_ENTER_MODE,
SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
break;
case DP_USBPD_EVT_EXIT:
usbpd_send_svdm(pd->pd, USB_C_DP_SID,
USBPD_SVDM_EXIT_MODE,
SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
break;
case DP_USBPD_EVT_STATUS:
config = 0x1; /* DFP_D connected */
usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_STATUS,
SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
break;
case DP_USBPD_EVT_CONFIGURE:
config = dp_usbpd_gen_config_pkt(pd);
usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_CONFIGURE,
SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
break;
default:
pr_err("unknown event:%d\n", event);
}
}
static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr)
{
struct dp_usbpd_private *pd;
pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
if (!pd) {
pr_err("get_usbpd phandle failed\n");
return;
}
pr_debug("\n");
dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER);
}
static void dp_usbpd_disconnect_cb(struct usbpd_svid_handler *hdlr)
{
struct dp_usbpd_private *pd;
pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
if (!pd) {
pr_err("get_usbpd phandle failed\n");
return;
}
pd->alt_mode = DP_USBPD_ALT_MODE_NONE;
pd->dp_usbpd.base.alt_mode_cfg_done = false;
pr_debug("\n");
if (pd->dp_cb && pd->dp_cb->disconnect)
pd->dp_cb->disconnect(pd->dev);
}
static int dp_usbpd_validate_callback(u8 cmd,
enum usbpd_svdm_cmd_type cmd_type, int num_vdos)
{
int ret = 0;
if (cmd_type == SVDM_CMD_TYPE_RESP_NAK) {
pr_err("error: NACK\n");
ret = -EINVAL;
goto end;
}
if (cmd_type == SVDM_CMD_TYPE_RESP_BUSY) {
pr_err("error: BUSY\n");
ret = -EBUSY;
goto end;
}
if (cmd == USBPD_SVDM_ATTENTION) {
if (cmd_type != SVDM_CMD_TYPE_INITIATOR) {
pr_err("error: invalid cmd type for attention\n");
ret = -EINVAL;
goto end;
}
if (!num_vdos) {
pr_err("error: no vdo provided\n");
ret = -EINVAL;
goto end;
}
} else {
if (cmd_type != SVDM_CMD_TYPE_RESP_ACK) {
pr_err("error: invalid cmd type\n");
ret = -EINVAL;
}
}
end:
return ret;
}
static int dp_usbpd_get_ss_lanes(struct dp_usbpd_private *pd)
{
int rc = 0;
int timeout = 250;
/*
* By default, USB reserves two lanes for Super Speed.
* Which means DP has remaining two lanes to operate on.
* If multi-function is not supported, request USB to
* release the Super Speed lanes so that DP can use
* all four lanes in case DPCD indicates support for
* four lanes.
*/
if (!pd->dp_usbpd.base.multi_func) {
while (timeout) {
rc = pd->svid_handler.request_usb_ss_lane(
pd->pd, &pd->svid_handler);
if (rc != -EBUSY)
break;
pr_warn("USB busy, retry\n");
/* wait for hw recommended delay for usb */
msleep(20);
timeout--;
}
}
return rc;
}
static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd,
enum usbpd_svdm_cmd_type cmd_type,
const u32 *vdos, int num_vdos)
{
struct dp_usbpd_private *pd;
int rc = 0;
pd = container_of(hdlr, struct dp_usbpd_private, svid_handler);
pr_debug("callback -> cmd: %s, *vdos = 0x%x, num_vdos = %d\n",
dp_usbpd_cmd_name(cmd), *vdos, num_vdos);
if (dp_usbpd_validate_callback(cmd, cmd_type, num_vdos)) {
pr_debug("invalid callback received\n");
return;
}
switch (cmd) {
case USBPD_SVDM_DISCOVER_MODES:
pd->vdo = *vdos;
dp_usbpd_get_capabilities(pd);
pd->alt_mode |= DP_USBPD_ALT_MODE_DISCOVER;
if (pd->cap.port & BIT(0))
dp_usbpd_send_event(pd, DP_USBPD_EVT_ENTER);
break;
case USBPD_SVDM_ENTER_MODE:
pd->alt_mode |= DP_USBPD_ALT_MODE_ENTER;
dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS);
break;
case USBPD_SVDM_ATTENTION:
if (pd->forced_disconnect)
break;
pd->vdo = *vdos;
dp_usbpd_get_status(pd);
if (!pd->dp_usbpd.base.alt_mode_cfg_done) {
if (pd->dp_usbpd.port & BIT(1))
dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
break;
}
if (pd->dp_cb && pd->dp_cb->attention)
pd->dp_cb->attention(pd->dev);
break;
case DP_USBPD_VDM_STATUS:
pd->vdo = *vdos;
dp_usbpd_get_status(pd);
if (!(pd->alt_mode & DP_USBPD_ALT_MODE_CONFIGURE)) {
pd->alt_mode |= DP_USBPD_ALT_MODE_STATUS;
if (pd->dp_usbpd.port & BIT(1))
dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
}
break;
case DP_USBPD_VDM_CONFIGURE:
pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE;
pd->dp_usbpd.base.alt_mode_cfg_done = true;
dp_usbpd_get_status(pd);
pd->dp_usbpd.base.orientation =
usbpd_get_plug_orientation(pd->pd);
rc = dp_usbpd_get_ss_lanes(pd);
if (rc) {
pr_err("failed to get SuperSpeed lanes\n");
break;
}
if (pd->dp_cb && pd->dp_cb->configure)
pd->dp_cb->configure(pd->dev);
break;
default:
pr_err("unknown cmd: %d\n", cmd);
break;
}
}
static int dp_usbpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd)
{
int rc = 0;
struct dp_usbpd *dp_usbpd;
struct dp_usbpd_private *pd;
if (!dp_hpd) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
dp_usbpd->base.hpd_high = hpd;
pd->forced_disconnect = !hpd;
pd->dp_usbpd.base.alt_mode_cfg_done = hpd;
pr_debug("hpd_high=%d, forced_disconnect=%d, orientation=%d\n",
dp_usbpd->base.hpd_high, pd->forced_disconnect,
pd->dp_usbpd.base.orientation);
if (hpd)
pd->dp_cb->configure(pd->dev);
else
pd->dp_cb->disconnect(pd->dev);
error:
return rc;
}
static int dp_usbpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo)
{
int rc = 0;
struct dp_usbpd *dp_usbpd;
struct dp_usbpd_private *pd;
dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
if (!dp_usbpd) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
pd->vdo = vdo;
dp_usbpd_get_status(pd);
if (pd->dp_cb && pd->dp_cb->attention)
pd->dp_cb->attention(pd->dev);
error:
return rc;
}
int dp_usbpd_register(struct dp_hpd *dp_hpd)
{
struct dp_usbpd *dp_usbpd;
struct dp_usbpd_private *usbpd;
int rc = 0;
if (!dp_hpd)
return -EINVAL;
dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
rc = usbpd_register_svid(usbpd->pd, &usbpd->svid_handler);
if (rc)
pr_err("pd registration failed\n");
return rc;
}
struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb)
{
int rc = 0;
const char *pd_phandle = "qcom,dp-usbpd-detection";
struct usbpd *pd = NULL;
struct dp_usbpd_private *usbpd;
struct dp_usbpd *dp_usbpd;
struct usbpd_svid_handler svid_handler = {
.svid = USB_C_DP_SID,
.vdm_received = NULL,
.connect = &dp_usbpd_connect_cb,
.svdm_received = &dp_usbpd_response_cb,
.disconnect = &dp_usbpd_disconnect_cb,
};
if (!cb) {
pr_err("invalid cb data\n");
rc = -EINVAL;
goto error;
}
pd = devm_usbpd_get_by_phandle(dev, pd_phandle);
if (IS_ERR(pd)) {
pr_err("usbpd phandle failed (%ld)\n", PTR_ERR(pd));
rc = PTR_ERR(pd);
goto error;
}
usbpd = devm_kzalloc(dev, sizeof(*usbpd), GFP_KERNEL);
if (!usbpd) {
rc = -ENOMEM;
goto error;
}
usbpd->dev = dev;
usbpd->pd = pd;
usbpd->svid_handler = svid_handler;
usbpd->dp_cb = cb;
dp_usbpd = &usbpd->dp_usbpd;
dp_usbpd->base.simulate_connect = dp_usbpd_simulate_connect;
dp_usbpd->base.simulate_attention = dp_usbpd_simulate_attention;
dp_usbpd->base.register_hpd = dp_usbpd_register;
return &dp_usbpd->base;
error:
return ERR_PTR(rc);
}
void dp_usbpd_put(struct dp_hpd *dp_hpd)
{
struct dp_usbpd *dp_usbpd;
struct dp_usbpd_private *usbpd;
dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base);
if (!dp_usbpd)
return;
usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
usbpd_unregister_svid(usbpd->pd, &usbpd->svid_handler);
devm_kfree(usbpd->dev, usbpd);
}

64
msm/dp/dp_usbpd.h Normal file
View File

@@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DP_USBPD_H_
#define _DP_USBPD_H_
#include <linux/types.h>
#include "dp_hpd.h"
struct device;
/**
* enum dp_usbpd_port - usb/dp port type
* @DP_USBPD_PORT_NONE: port not configured
* @DP_USBPD_PORT_UFP_D: Upstream Facing Port - DisplayPort
* @DP_USBPD_PORT_DFP_D: Downstream Facing Port - DisplayPort
* @DP_USBPD_PORT_D_UFP_D: Both UFP & DFP - DisplayPort
*/
enum dp_usbpd_port {
DP_USBPD_PORT_NONE,
DP_USBPD_PORT_UFP_D,
DP_USBPD_PORT_DFP_D,
DP_USBPD_PORT_D_UFP_D,
};
/**
* struct dp_usbpd - DisplayPort status
*
* @port: port configured
* @low_pow_st: low power state
* @adaptor_dp_en: adaptor functionality enabled
* @usb_config_req: request to switch to usb
* @exit_dp_mode: request exit from displayport mode
* @debug_en: bool to specify debug mode
*/
struct dp_usbpd {
struct dp_hpd base;
enum dp_usbpd_port port;
bool low_pow_st;
bool adaptor_dp_en;
bool usb_config_req;
bool exit_dp_mode;
bool debug_en;
};
/**
* dp_usbpd_get() - setup usbpd module
*
* @dev: device instance of the caller
* @cb: struct containing callback function pointers.
*
* This function allows the client to initialize the usbpd
* module. The module will communicate with usb driver and
* handles the power delivery (PD) communication with the
* sink/usb device. This module will notify the client using
* the callback functions about the connection and status.
*/
struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb);
void dp_usbpd_put(struct dp_hpd *pd);
#endif /* _DP_USBPD_H_ */

298
msm/dsi/dsi_catalog.c Normal file
View File

@@ -0,0 +1,298 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
#include <linux/errno.h>
#include "dsi_catalog.h"
/**
* dsi_catalog_cmn_init() - catalog init for dsi controller v1.4
*/
static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
enum dsi_ctrl_version version)
{
/* common functions */
ctrl->ops.host_setup = dsi_ctrl_hw_cmn_host_setup;
ctrl->ops.video_engine_en = dsi_ctrl_hw_cmn_video_engine_en;
ctrl->ops.video_engine_setup = dsi_ctrl_hw_cmn_video_engine_setup;
ctrl->ops.set_video_timing = dsi_ctrl_hw_cmn_set_video_timing;
ctrl->ops.set_timing_db = dsi_ctrl_hw_cmn_set_timing_db;
ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_cmn_cmd_engine_setup;
ctrl->ops.setup_cmd_stream = dsi_ctrl_hw_cmn_setup_cmd_stream;
ctrl->ops.ctrl_en = dsi_ctrl_hw_cmn_ctrl_en;
ctrl->ops.cmd_engine_en = dsi_ctrl_hw_cmn_cmd_engine_en;
ctrl->ops.phy_sw_reset = dsi_ctrl_hw_cmn_phy_sw_reset;
ctrl->ops.soft_reset = dsi_ctrl_hw_cmn_soft_reset;
ctrl->ops.kickoff_command = dsi_ctrl_hw_cmn_kickoff_command;
ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_cmn_kickoff_fifo_command;
ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_cmn_reset_cmd_fifo;
ctrl->ops.trigger_command_dma = dsi_ctrl_hw_cmn_trigger_command_dma;
ctrl->ops.get_interrupt_status = dsi_ctrl_hw_cmn_get_interrupt_status;
ctrl->ops.get_error_status = dsi_ctrl_hw_cmn_get_error_status;
ctrl->ops.clear_error_status = dsi_ctrl_hw_cmn_clear_error_status;
ctrl->ops.clear_interrupt_status =
dsi_ctrl_hw_cmn_clear_interrupt_status;
ctrl->ops.enable_status_interrupts =
dsi_ctrl_hw_cmn_enable_status_interrupts;
ctrl->ops.enable_error_interrupts =
dsi_ctrl_hw_cmn_enable_error_interrupts;
ctrl->ops.video_test_pattern_setup =
dsi_ctrl_hw_cmn_video_test_pattern_setup;
ctrl->ops.cmd_test_pattern_setup =
dsi_ctrl_hw_cmn_cmd_test_pattern_setup;
ctrl->ops.test_pattern_enable = dsi_ctrl_hw_cmn_test_pattern_enable;
ctrl->ops.trigger_cmd_test_pattern =
dsi_ctrl_hw_cmn_trigger_cmd_test_pattern;
ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err;
ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config;
ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus;
ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data;
ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg;
ctrl->ops.ctrl_reset = dsi_ctrl_hw_cmn_ctrl_reset;
ctrl->ops.mask_error_intr = dsi_ctrl_hw_cmn_mask_error_intr;
ctrl->ops.error_intr_ctrl = dsi_ctrl_hw_cmn_error_intr_ctrl;
ctrl->ops.get_error_mask = dsi_ctrl_hw_cmn_get_error_mask;
ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version;
ctrl->ops.wait_for_cmd_mode_mdp_idle =
dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle;
ctrl->ops.setup_avr = dsi_ctrl_hw_cmn_setup_avr;
ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk;
switch (version) {
case DSI_CTRL_VERSION_1_4:
ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map;
ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request;
ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit;
ctrl->ops.wait_for_lane_idle =
dsi_ctrl_hw_14_wait_for_lane_idle;
ctrl->ops.ulps_ops.get_lanes_in_ulps =
dsi_ctrl_hw_cmn_get_lanes_in_ulps;
ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable;
ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
ctrl->ops.reg_dump_to_buffer =
dsi_ctrl_hw_14_reg_dump_to_buffer;
ctrl->ops.schedule_dma_cmd = NULL;
ctrl->ops.get_cont_splash_status = NULL;
ctrl->ops.kickoff_command_non_embedded_mode = NULL;
ctrl->ops.config_clk_gating = NULL;
break;
case DSI_CTRL_VERSION_2_0:
ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
ctrl->ops.wait_for_lane_idle =
dsi_ctrl_hw_20_wait_for_lane_idle;
ctrl->ops.reg_dump_to_buffer =
dsi_ctrl_hw_20_reg_dump_to_buffer;
ctrl->ops.ulps_ops.ulps_request = NULL;
ctrl->ops.ulps_ops.ulps_exit = NULL;
ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
ctrl->ops.schedule_dma_cmd = NULL;
ctrl->ops.get_cont_splash_status = NULL;
ctrl->ops.kickoff_command_non_embedded_mode = NULL;
ctrl->ops.config_clk_gating = NULL;
break;
case DSI_CTRL_VERSION_2_2:
case DSI_CTRL_VERSION_2_3:
case DSI_CTRL_VERSION_2_4:
ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config;
ctrl->ops.config_clk_gating = dsi_ctrl_hw_22_config_clk_gating;
ctrl->ops.get_cont_splash_status =
dsi_ctrl_hw_22_get_cont_splash_status;
ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
ctrl->ops.wait_for_lane_idle =
dsi_ctrl_hw_20_wait_for_lane_idle;
ctrl->ops.reg_dump_to_buffer =
dsi_ctrl_hw_20_reg_dump_to_buffer;
ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request;
ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit;
ctrl->ops.ulps_ops.get_lanes_in_ulps =
dsi_ctrl_hw_cmn_get_lanes_in_ulps;
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd;
ctrl->ops.kickoff_command_non_embedded_mode =
dsi_ctrl_hw_kickoff_non_embedded_mode;
break;
default:
break;
}
}
/**
* dsi_catalog_ctrl_setup() - return catalog info for dsi controller
* @ctrl: Pointer to DSI controller hw object.
* @version: DSI controller version.
* @index: DSI controller instance ID.
* @phy_isolation_enabled: DSI controller works isolated from phy.
* @null_insertion_enabled: DSI controller inserts null packet.
*
* This function setups the catalog information in the dsi_ctrl_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
enum dsi_ctrl_version version, u32 index,
bool phy_isolation_enabled, bool null_insertion_enabled)
{
int rc = 0;
if (version == DSI_CTRL_VERSION_UNKNOWN ||
version >= DSI_CTRL_VERSION_MAX) {
pr_err("Unsupported version: %d\n", version);
return -ENOTSUPP;
}
ctrl->index = index;
ctrl->null_insertion_enabled = null_insertion_enabled;
set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
switch (version) {
case DSI_CTRL_VERSION_1_4:
dsi_catalog_cmn_init(ctrl, version);
break;
case DSI_CTRL_VERSION_2_0:
case DSI_CTRL_VERSION_2_2:
case DSI_CTRL_VERSION_2_3:
case DSI_CTRL_VERSION_2_4:
ctrl->phy_isolation_enabled = phy_isolation_enabled;
dsi_catalog_cmn_init(ctrl, version);
break;
default:
return -ENOTSUPP;
}
return rc;
}
/**
* dsi_catalog_phy_2_0_init() - catalog init for DSI PHY 14nm
*/
static void dsi_catalog_phy_2_0_init(struct dsi_phy_hw *phy)
{
phy->ops.regulator_enable = dsi_phy_hw_v2_0_regulator_enable;
phy->ops.regulator_disable = dsi_phy_hw_v2_0_regulator_disable;
phy->ops.enable = dsi_phy_hw_v2_0_enable;
phy->ops.disable = dsi_phy_hw_v2_0_disable;
phy->ops.calculate_timing_params =
dsi_phy_hw_calculate_timing_params;
phy->ops.phy_idle_on = dsi_phy_hw_v2_0_idle_on;
phy->ops.phy_idle_off = dsi_phy_hw_v2_0_idle_off;
phy->ops.calculate_timing_params =
dsi_phy_hw_calculate_timing_params;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v2_0;
phy->ops.clamp_ctrl = dsi_phy_hw_v2_0_clamp_ctrl;
}
/**
* dsi_catalog_phy_3_0_init() - catalog init for DSI PHY 10nm
*/
static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
{
phy->ops.regulator_enable = dsi_phy_hw_v3_0_regulator_enable;
phy->ops.regulator_disable = dsi_phy_hw_v3_0_regulator_disable;
phy->ops.enable = dsi_phy_hw_v3_0_enable;
phy->ops.disable = dsi_phy_hw_v3_0_disable;
phy->ops.calculate_timing_params =
dsi_phy_hw_calculate_timing_params;
phy->ops.ulps_ops.wait_for_lane_idle =
dsi_phy_hw_v3_0_wait_for_lane_idle;
phy->ops.ulps_ops.ulps_request =
dsi_phy_hw_v3_0_ulps_request;
phy->ops.ulps_ops.ulps_exit =
dsi_phy_hw_v3_0_ulps_exit;
phy->ops.ulps_ops.get_lanes_in_ulps =
dsi_phy_hw_v3_0_get_lanes_in_ulps;
phy->ops.ulps_ops.is_lanes_in_ulps =
dsi_phy_hw_v3_0_is_lanes_in_ulps;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
}
/**
* dsi_catalog_phy_4_0_init() - catalog init for DSI PHY 7nm
*/
static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
{
phy->ops.regulator_enable = NULL;
phy->ops.regulator_disable = NULL;
phy->ops.enable = dsi_phy_hw_v4_0_enable;
phy->ops.disable = dsi_phy_hw_v4_0_disable;
phy->ops.calculate_timing_params =
dsi_phy_hw_calculate_timing_params;
phy->ops.ulps_ops.wait_for_lane_idle =
dsi_phy_hw_v4_0_wait_for_lane_idle;
phy->ops.ulps_ops.ulps_request =
dsi_phy_hw_v4_0_ulps_request;
phy->ops.ulps_ops.ulps_exit =
dsi_phy_hw_v4_0_ulps_exit;
phy->ops.ulps_ops.get_lanes_in_ulps =
dsi_phy_hw_v4_0_get_lanes_in_ulps;
phy->ops.ulps_ops.is_lanes_in_ulps =
dsi_phy_hw_v4_0_is_lanes_in_ulps;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v4_0;
phy->ops.phy_lane_reset = dsi_phy_hw_v4_0_lane_reset;
phy->ops.toggle_resync_fifo = dsi_phy_hw_v4_0_toggle_resync_fifo;
phy->ops.reset_clk_en_sel = dsi_phy_hw_v4_0_reset_clk_en_sel;
}
/**
* dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
* @ctrl: Pointer to DSI PHY hw object.
* @version: DSI PHY version.
* @index: DSI PHY instance ID.
*
* This function setups the catalog information in the dsi_phy_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
enum dsi_phy_version version,
u32 index)
{
int rc = 0;
if (version == DSI_PHY_VERSION_UNKNOWN ||
version >= DSI_PHY_VERSION_MAX) {
pr_err("Unsupported version: %d\n", version);
return -ENOTSUPP;
}
phy->index = index;
phy->version = version;
set_bit(DSI_PHY_DPHY, phy->feature_map);
dsi_phy_timing_calc_init(phy, version);
switch (version) {
case DSI_PHY_VERSION_2_0:
dsi_catalog_phy_2_0_init(phy);
break;
case DSI_PHY_VERSION_3_0:
dsi_catalog_phy_3_0_init(phy);
break;
case DSI_PHY_VERSION_4_0:
case DSI_PHY_VERSION_4_1:
dsi_catalog_phy_4_0_init(phy);
break;
case DSI_PHY_VERSION_0_0_HPM:
case DSI_PHY_VERSION_0_0_LPM:
case DSI_PHY_VERSION_1_0:
default:
return -ENOTSUPP;
}
return rc;
}

242
msm/dsi/dsi_catalog.h Normal file
View File

@@ -0,0 +1,242 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CATALOG_H_
#define _DSI_CATALOG_H_
#include "dsi_ctrl_hw.h"
#include "dsi_phy_hw.h"
/**
* dsi_catalog_ctrl_setup() - return catalog info for dsi controller
* @ctrl: Pointer to DSI controller hw object.
* @version: DSI controller version.
* @index: DSI controller instance ID.
* @phy_isolation_enabled: DSI controller works isolated from phy.
* @null_insertion_enabled: DSI controller inserts null packet.
*
* This function setups the catalog information in the dsi_ctrl_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
enum dsi_ctrl_version version, u32 index,
bool phy_isolation_enabled, bool null_insertion_enabled);
/**
* dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
* @phy: Pointer to DSI PHY hw object.
* @version: DSI PHY version.
* @index: DSI PHY instance ID.
*
* This function setups the catalog information in the dsi_phy_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
enum dsi_phy_version version,
u32 index);
/**
* dsi_phy_timing_calc_init() - initialize info for DSI PHY timing calculations
* @phy: Pointer to DSI PHY hw object.
* @version: DSI PHY version.
*
* This function setups the catalog information in the dsi_phy_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
enum dsi_phy_version version);
/**
* dsi_phy_hw_calculate_timing_params() - DSI PHY timing parameter calculations
* @phy: Pointer to DSI PHY hw object.
* @mode: DSI mode information.
* @host: DSI host configuration.
* @timing: DSI phy lane configurations.
*
* This function setups the catalog information in the dsi_phy_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
struct dsi_mode_info *mode,
struct dsi_host_common_cfg *host,
struct dsi_phy_per_lane_cfgs *timing);
/* Definitions for 14nm PHY hardware driver */
void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *cfg);
void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy);
void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy);
int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable);
/* Definitions for 10nm PHY hardware driver */
void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *cfg);
void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy);
void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
int dsi_phy_hw_v3_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes);
void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable);
int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy);
void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
/* Definitions for 7nm PHY hardware driver */
void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
int dsi_phy_hw_v4_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes);
void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
bool dsi_phy_hw_v4_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy);
void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy);
/* DSI controller common ops */
u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl, u32 *entries,
u32 size);
void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
u32 ints);
u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
u64 errors);
void dsi_ctrl_hw_cmn_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
enum dsi_test_pattern type,
u32 init_val);
void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
enum dsi_test_pattern type,
u32 init_val,
u32 stream_id);
void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
void dsi_ctrl_hw_cmn_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
u32 stream_id);
void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl,
struct dsi_host_common_cfg *config);
void dsi_ctrl_hw_cmn_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
struct dsi_host_common_cfg *common_cfg,
struct dsi_video_engine_cfg *cfg);
void dsi_ctrl_hw_cmn_setup_avr(struct dsi_ctrl_hw *ctrl, bool enable);
void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl,
struct dsi_mode_info *mode);
void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl,
bool enable);
void dsi_ctrl_hw_cmn_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
struct dsi_host_common_cfg *common_cfg,
struct dsi_cmd_engine_cfg *cfg);
void dsi_ctrl_hw_cmn_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
void dsi_ctrl_hw_cmn_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
struct dsi_mode_info *mode,
u32 h_stride,
u32 vc_id,
struct dsi_rect *roi);
void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
enum dsi_op_mode panel_mode,
bool enable, u32 frame_count);
u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
enum dsi_op_mode panel_mode);
void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags);
void dsi_ctrl_hw_cmn_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_fifo_info *cmd,
u32 flags);
void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl,
bool enable);
void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
bool enable);
u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
u8 *rd_buf,
u32 read_offset,
u32 rx_byte,
u32 pkt_size, u32 *hw_read_cnt);
void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_on);
int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
int mask);
void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx,
bool en);
void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en);
u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl);
u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl);
int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl);
/* Definitions specific to 1.4 DSI controller hardware */
int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map);
void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool enable_ulps);
void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool disable_ulps);
ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size);
/* Definitions specific to 2.0 DSI controller hardware */
void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map);
int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size);
void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags);
/* Definitions specific to 2.2 DSI controller hardware */
bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
enum dsi_clk_gate_type clk_selection);
void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
#endif /* _DSI_CATALOG_H_ */

312
msm/dsi/dsi_clk.h Normal file
View File

@@ -0,0 +1,312 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CLK_H_
#define _DSI_CLK_H_
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/clk.h>
#include "sde_power_handle.h"
#define MAX_STRING_LEN 32
#define MAX_DSI_CTRL 2
enum dsi_clk_state {
DSI_CLK_OFF,
DSI_CLK_ON,
DSI_CLK_EARLY_GATE,
};
enum clk_req_client {
DSI_CLK_REQ_MDP_CLIENT = 0,
DSI_CLK_REQ_DSI_CLIENT,
};
enum dsi_link_clk_type {
DSI_LINK_ESC_CLK,
DSI_LINK_BYTE_CLK,
DSI_LINK_PIX_CLK,
DSI_LINK_BYTE_INTF_CLK,
DSI_LINK_CLK_MAX,
};
enum dsi_link_clk_op_type {
DSI_LINK_CLK_SET_RATE = BIT(0),
DSI_LINK_CLK_PREPARE = BIT(1),
DSI_LINK_CLK_ENABLE = BIT(2),
DSI_LINK_CLK_START = BIT(0) | BIT(1) | BIT(2),
};
enum dsi_clk_type {
DSI_CORE_CLK = BIT(0),
DSI_LINK_CLK = BIT(1),
DSI_ALL_CLKS = (BIT(0) | BIT(1)),
DSI_CLKS_MAX = BIT(2),
};
enum dsi_lclk_type {
DSI_LINK_NONE = 0,
DSI_LINK_LP_CLK = BIT(0),
DSI_LINK_HS_CLK = BIT(1),
};
struct dsi_clk_ctrl_info {
enum dsi_clk_type clk_type;
enum dsi_clk_state clk_state;
enum clk_req_client client;
};
struct clk_ctrl_cb {
void *priv;
int (*dsi_clk_cb)(void *priv, struct dsi_clk_ctrl_info clk_ctrl_info);
};
/**
* struct dsi_core_clk_info - Core clock information for DSI hardware
* @mdp_core_clk: Handle to MDP core clock.
* @iface_clk: Handle to MDP interface clock.
* @core_mmss_clk: Handle to MMSS core clock.
* @bus_clk: Handle to bus clock.
* @mnoc_clk: Handle to MMSS NOC clock.
* @dsi_core_client: Pointer to SDE power client
* @phandle: Pointer to SDE power handle
*/
struct dsi_core_clk_info {
struct clk *mdp_core_clk;
struct clk *iface_clk;
struct clk *core_mmss_clk;
struct clk *bus_clk;
struct clk *mnoc_clk;
struct sde_power_client *dsi_core_client;
struct sde_power_handle *phandle;
};
/**
* struct dsi_link_hs_clk_info - Set of high speed link clocks for DSI HW
* @byte_clk: Handle to DSI byte_clk.
* @pixel_clk: Handle to DSI pixel_clk.
* @byte_intf_clk: Handle to DSI byte intf. clock.
*/
struct dsi_link_hs_clk_info {
struct clk *byte_clk;
struct clk *pixel_clk;
struct clk *byte_intf_clk;
};
/**
* struct dsi_link_lp_clk_info - Set of low power link clocks for DSI HW.
* @esc_clk: Handle to DSI escape clock.
*/
struct dsi_link_lp_clk_info {
struct clk *esc_clk;
};
/**
* struct link_clk_freq - Clock frequency information for Link clocks
* @byte_clk_rate: Frequency of DSI byte_clk in KHz.
* @pixel_clk_rate: Frequency of DSI pixel_clk in KHz.
* @esc_clk_rate: Frequency of DSI escape clock in KHz.
*/
struct link_clk_freq {
u32 byte_clk_rate;
u32 pix_clk_rate;
u32 esc_clk_rate;
};
/**
* typedef *pre_clockoff_cb() - Callback before clock is turned off
* @priv: private data pointer.
* @clk_type: clock which is being turned off.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
typedef int (*pre_clockoff_cb)(void *priv,
enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state new_state);
/**
* typedef *post_clockoff_cb() - Callback after clock is turned off
* @priv: private data pointer.
* @clk_type: clock which was turned off.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
typedef int (*post_clockoff_cb)(void *priv,
enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state);
/**
* typedef *post_clockon_cb() - Callback after clock is turned on
* @priv: private data pointer.
* @clk_type: clock which was turned on.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
typedef int (*post_clockon_cb)(void *priv,
enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state);
/**
* typedef *pre_clockon_cb() - Callback before clock is turned on
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
* @l_type: specifies if the clock is HS or LP type.Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
typedef int (*pre_clockon_cb)(void *priv,
enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state new_state);
/**
* struct dsi_clk_info - clock information for DSI hardware.
* @name: client name.
* @c_clks[MAX_DSI_CTRL] array of core clock configurations
* @l_lp_clks[MAX_DSI_CTRL] array of low power(esc) clock configurations
* @l_hs_clks[MAX_DSI_CTRL] array of high speed clock configurations
* @bus_handle[MAX_DSI_CTRL] array of bus handles
* @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped
* to core and link clock configurations
* @pre_clkoff_cb callback before clock is turned off
* @post_clkoff_cb callback after clock is turned off
* @post_clkon_cb callback after clock is turned on
* @pre_clkon_cb callback before clock is turned on
* @priv_data pointer to private data
* @master_ndx master DSI controller index
* @dsi_ctrl_count number of DSI controllers
*/
struct dsi_clk_info {
char name[MAX_STRING_LEN];
struct dsi_core_clk_info c_clks[MAX_DSI_CTRL];
struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL];
struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL];
u32 bus_handle[MAX_DSI_CTRL];
u32 ctrl_index[MAX_DSI_CTRL];
pre_clockoff_cb pre_clkoff_cb;
post_clockoff_cb post_clkoff_cb;
post_clockon_cb post_clkon_cb;
pre_clockon_cb pre_clkon_cb;
void *priv_data;
u32 master_ndx;
u32 dsi_ctrl_count;
};
/**
* struct dsi_clk_link_set - Pair of clock handles to describe link clocks
* @byte_clk: Handle to DSi byte_clk.
* @pixel_clk: Handle to DSI pixel_clk.
*/
struct dsi_clk_link_set {
struct clk *byte_clk;
struct clk *pixel_clk;
};
/**
* dsi_display_clk_mngr_update_splash_status() - Update splash stattus
* @clk_mngr: Structure containing DSI clock information
* @status: Splash status
*/
void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status);
/**
* dsi_display_clk_mgr_register() - Register DSI clock manager
* @info: Structure containing DSI clock information
*/
void *dsi_display_clk_mngr_register(struct dsi_clk_info *info);
/**
* dsi_display_clk_mngr_deregister() - Deregister DSI clock manager
* @clk_mngr: DSI clock manager pointer
*/
int dsi_display_clk_mngr_deregister(void *clk_mngr);
/**
* dsi_register_clk_handle() - Register clock handle with DSI clock manager
* @clk_mngr: DSI clock manager pointer
* @client: DSI clock client pointer.
*/
void *dsi_register_clk_handle(void *clk_mngr, char *client);
/**
* dsi_deregister_clk_handle() - Deregister clock handle from DSI clock manager
* @client: DSI clock client pointer.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_deregister_clk_handle(void *client);
/**
* dsi_display_link_clk_force_update_ctrl() - force to set link clks
* @handle: Handle of desired DSI clock client.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_display_link_clk_force_update_ctrl(void *handle);
/**
* dsi_display_clk_ctrl() - set frequencies for link clks
* @handle: Handle of desired DSI clock client.
* @clk_type: Clock which is being controlled.
* @clk_state: Desired state of clock
*
* return: error code in case of failure or 0 for success.
*/
int dsi_display_clk_ctrl(void *handle,
enum dsi_clk_type clk_type, enum dsi_clk_state clk_state);
/**
* dsi_clk_set_link_frequencies() - set frequencies for link clks
* @client: DSI clock client pointer.
* @freq: Structure containing link clock frequencies.
* @index: Index of the DSI controller.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq,
u32 index);
/**
* dsi_clk_set_pixel_clk_rate() - set frequency for pixel_clk
* @client: DSI clock client pointer.
* @pixel_clk: Pixel_clk rate in Hz.
* @index: Index of the DSI controller.
* return: error code in case of failure or 0 for success.
*/
int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index);
/**
* dsi_clk_set_byte_clk_rate() - set frequency for byte clock
* @client: DSI clock client pointer.
* @byte_clk: Pixel clock rate in Hz.
* @index: Index of the DSI controller.
* return: error code in case of failure or 0 for success.
*/
int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index);
/**
* dsi_clk_update_parent() - update parent clocks for specified clock
* @parent: link clock pair which are set as parent.
* @child: link clock pair whose parent has to be set.
*/
int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
struct dsi_clk_link_set *child);
#endif /* _DSI_CLK_H_ */

1470
msm/dsi/dsi_clk_manager.c Normal file

File diff suppressed because it is too large Load Diff

3614
msm/dsi/dsi_ctrl.c Normal file

File diff suppressed because it is too large Load Diff

797
msm/dsi/dsi_ctrl.h Normal file
View File

@@ -0,0 +1,797 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CTRL_H_
#define _DSI_CTRL_H_
#include <linux/debugfs.h>
#include "dsi_defs.h"
#include "dsi_ctrl_hw.h"
#include "dsi_clk.h"
#include "dsi_pwr.h"
#include "drm_mipi_dsi.h"
/*
* DSI Command transfer modifiers
* @DSI_CTRL_CMD_READ: The current transfer involves reading data.
* @DSI_CTRL_CMD_BROADCAST: The current transfer needs to be done in
* broadcast mode to multiple slaves.
* @DSI_CTRL_CMD_BROADCAST_MASTER: This controller is the master and the slaves
* sync to this trigger.
* @DSI_CTRL_CMD_DEFER_TRIGGER: Defer the command trigger to later.
* @DSI_CTRL_CMD_FIFO_STORE: Use FIFO for command transfer in place of
* reading data from memory.
* @DSI_CTRL_CMD_FETCH_MEMORY: Fetch command from memory through AXI bus
* and transfer it.
* @DSI_CTRL_CMD_LAST_COMMAND: Trigger the DMA cmd transfer if this is last
* command in the batch.
* @DSI_CTRL_CMD_NON_EMBEDDED_MODE:Transfer cmd packets in non embedded mode.
* @DSI_CTRL_CMD_CUSTOM_DMA_SCHED: Use the dma scheduling line number defined in
* display panel dtsi file instead of default.
*/
#define DSI_CTRL_CMD_READ 0x1
#define DSI_CTRL_CMD_BROADCAST 0x2
#define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
#define DSI_CTRL_CMD_DEFER_TRIGGER 0x8
#define DSI_CTRL_CMD_FIFO_STORE 0x10
#define DSI_CTRL_CMD_FETCH_MEMORY 0x20
#define DSI_CTRL_CMD_LAST_COMMAND 0x40
#define DSI_CTRL_CMD_NON_EMBEDDED_MODE 0x80
#define DSI_CTRL_CMD_CUSTOM_DMA_SCHED 0x100
/* DSI embedded mode fifo size
* If the command is greater than 256 bytes it is sent in non-embedded mode.
*/
#define DSI_EMBEDDED_MODE_DMA_MAX_SIZE_BYTES 256
/* max size supported for dsi cmd transfer using TPG */
#define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
/**
* enum dsi_power_state - defines power states for dsi controller.
* @DSI_CTRL_POWER_VREG_OFF: Digital and analog supplies for DSI controller
turned off
* @DSI_CTRL_POWER_VREG_ON: Digital and analog supplies for DSI controller
* @DSI_CTRL_POWER_MAX: Maximum value.
*/
enum dsi_power_state {
DSI_CTRL_POWER_VREG_OFF = 0,
DSI_CTRL_POWER_VREG_ON,
DSI_CTRL_POWER_MAX,
};
/**
* enum dsi_engine_state - define engine status for dsi controller.
* @DSI_CTRL_ENGINE_OFF: Engine is turned off.
* @DSI_CTRL_ENGINE_ON: Engine is turned on.
* @DSI_CTRL_ENGINE_MAX: Maximum value.
*/
enum dsi_engine_state {
DSI_CTRL_ENGINE_OFF = 0,
DSI_CTRL_ENGINE_ON,
DSI_CTRL_ENGINE_MAX,
};
/**
* struct dsi_ctrl_power_info - digital and analog power supplies for dsi host
* @digital: Digital power supply required to turn on DSI controller hardware.
* @host_pwr: Analog power supplies required to turn on DSI controller hardware.
* Even though DSI controller it self does not require an analog
* power supply, supplies required for PLL can be defined here to
* allow proper control over these supplies.
*/
struct dsi_ctrl_power_info {
struct dsi_regulator_info digital;
struct dsi_regulator_info host_pwr;
};
/**
* struct dsi_ctrl_clk_info - clock information for DSI controller
* @core_clks: Core clocks needed to access DSI controller registers.
* @hs_link_clks: Clocks required to transmit high speed data over DSI
* @lp_link_clks: Clocks required to perform low power ops over DSI
* @rcg_clks: Root clock generation clocks generated in MMSS_CC. The
* output of the PLL is set as parent for these root
* clocks. These clocks are specific to controller
* instance.
* @mux_clks: Mux clocks used for Dynamic refresh feature.
* @ext_clks: External byte/pixel clocks from the MMSS block. These
* clocks are set as parent to rcg clocks.
* @pll_op_clks: TODO:
* @shadow_clks: TODO:
*/
struct dsi_ctrl_clk_info {
/* Clocks parsed from DT */
struct dsi_core_clk_info core_clks;
struct dsi_link_hs_clk_info hs_link_clks;
struct dsi_link_lp_clk_info lp_link_clks;
struct dsi_clk_link_set rcg_clks;
/* Clocks set by DSI Manager */
struct dsi_clk_link_set mux_clks;
struct dsi_clk_link_set ext_clks;
struct dsi_clk_link_set pll_op_clks;
struct dsi_clk_link_set shadow_clks;
};
/**
* struct dsi_ctrl_bus_scale_info - Bus scale info for msm-bus bandwidth voting
* @bus_scale_table: Bus scale voting usecases.
* @bus_handle: Handle used for voting bandwidth.
*/
struct dsi_ctrl_bus_scale_info {
struct msm_bus_scale_pdata *bus_scale_table;
u32 bus_handle;
};
/**
* struct dsi_ctrl_state_info - current driver state information
* @power_state: Status of power states on DSI controller.
* @cmd_engine_state: Status of DSI command engine.
* @vid_engine_state: Status of DSI video engine.
* @controller_state: Status of DSI Controller engine.
* @host_initialized: Boolean to indicate status of DSi host Initialization
* @tpg_enabled: Boolean to indicate whether tpg is enabled.
*/
struct dsi_ctrl_state_info {
enum dsi_power_state power_state;
enum dsi_engine_state cmd_engine_state;
enum dsi_engine_state vid_engine_state;
enum dsi_engine_state controller_state;
bool host_initialized;
bool tpg_enabled;
};
/**
* struct dsi_ctrl_interrupts - define interrupt information
* @irq_lock: Spinlock for ISR handler.
* @irq_num: Linux interrupt number associated with device.
* @irq_stat_mask: Hardware mask of currently enabled interrupts.
* @irq_stat_refcount: Number of times each interrupt has been requested.
* @irq_stat_cb: Status IRQ callback definitions.
* @irq_err_cb: IRQ callback definition to handle DSI ERRORs.
* @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
* @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
* @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt.
*/
struct dsi_ctrl_interrupts {
spinlock_t irq_lock;
int irq_num;
uint32_t irq_stat_mask;
int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
struct dsi_event_cb_info irq_err_cb;
struct completion cmd_dma_done;
struct completion vid_frame_done;
struct completion cmd_frame_done;
struct completion bta_done;
};
/**
* struct dsi_ctrl - DSI controller object
* @pdev: Pointer to platform device.
* @cell_index: Instance cell id.
* @horiz_index: Index in physical horizontal CTRL layout, 0 = leftmost
* @name: Name of the controller instance.
* @refcount: ref counter.
* @ctrl_lock: Mutex for hardware and object access.
* @drm_dev: Pointer to DRM device.
* @version: DSI controller version.
* @hw: DSI controller hardware object.
* @current_state: Current driver and hardware state.
* @clk_cb: Callback for DSI clock control.
* @irq_info: Interrupt information.
* @recovery_cb: Recovery call back to SDE.
* @clk_info: Clock information.
* @clk_freq: DSi Link clock frequency information.
* @pwr_info: Power information.
* @axi_bus_info: AXI bus information.
* @host_config: Current host configuration.
* @mode_bounds: Boundaries of the default mode ROI.
* Origin is at top left of all CTRLs.
* @roi: Partial update region of interest.
* Origin is top left of this CTRL.
* @tx_cmd_buf: Tx command buffer.
* @cmd_buffer_iova: cmd buffer mapped address.
* @cmd_buffer_size: Size of command buffer.
* @vaddr: CPU virtual address of cmd buffer.
* @secure_mode: Indicates if secure-session is in progress
* @esd_check_underway: Indicates if esd status check is in progress
* @debugfs_root: Root for debugfs entries.
* @misr_enable: Frame MISR enable/disable
* @misr_cache: Cached Frame MISR value
* @phy_isolation_enabled: A boolean property allows to isolate the phy from
* dsi controller and run only dsi controller.
* @null_insertion_enabled: A boolean property to allow dsi controller to
* insert null packet.
* @modeupdated: Boolean to send new roi if mode is updated.
*/
struct dsi_ctrl {
struct platform_device *pdev;
u32 cell_index;
u32 horiz_index;
const char *name;
u32 refcount;
struct mutex ctrl_lock;
struct drm_device *drm_dev;
enum dsi_ctrl_version version;
struct dsi_ctrl_hw hw;
/* Current state */
struct dsi_ctrl_state_info current_state;
struct clk_ctrl_cb clk_cb;
struct dsi_ctrl_interrupts irq_info;
struct dsi_event_cb_info recovery_cb;
/* Clock and power states */
struct dsi_ctrl_clk_info clk_info;
struct link_clk_freq clk_freq;
struct dsi_ctrl_power_info pwr_info;
struct dsi_ctrl_bus_scale_info axi_bus_info;
struct dsi_host_config host_config;
struct dsi_rect mode_bounds;
struct dsi_rect roi;
/* Command tx and rx */
struct drm_gem_object *tx_cmd_buf;
u32 cmd_buffer_size;
u32 cmd_buffer_iova;
u32 cmd_len;
void *vaddr;
bool secure_mode;
bool esd_check_underway;
/* Debug Information */
struct dentry *debugfs_root;
/* MISR */
bool misr_enable;
u32 misr_cache;
/* Check for spurious interrupts */
unsigned long jiffies_start;
unsigned int error_interrupt_count;
bool phy_isolation_enabled;
bool null_insertion_enabled;
bool modeupdated;
};
/**
* dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
* @of_node: of_node of the DSI controller.
*
* Gets the DSI controller handle for the corresponding of_node. The ref count
* is incremented to one and all subsequent gets will fail until the original
* clients calls a put.
*
* Return: DSI Controller handle.
*/
struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node);
/**
* dsi_ctrl_put() - releases a dsi controller handle.
* @dsi_ctrl: DSI controller handle.
*
* Releases the DSI controller. Driver will clean up all resources and puts back
* the DSI controller into reset state.
*/
void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_drv_init() - initialize dsi controller driver.
* @dsi_ctrl: DSI controller handle.
* @parent: Parent directory for debug fs.
*
* Initializes DSI controller driver. Driver should be initialized after
* dsi_ctrl_get() succeeds.
*
* Return: error code.
*/
int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent);
/**
* dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
* @dsi_ctrl: DSI controller handle.
*
* Releases all resources acquired by dsi_ctrl_drv_init().
*
* Return: error code.
*/
int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_validate_timing() - validate a video timing configuration
* @dsi_ctrl: DSI controller handle.
* @timing: Pointer to timing data.
*
* Driver will validate if the timing configuration is supported on the
* controller hardware.
*
* Return: error code if timing is not supported.
*/
int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
struct dsi_mode_info *timing);
/**
* dsi_ctrl_update_host_config() - update dsi host configuration
* @dsi_ctrl: DSI controller handle.
* @config: DSI host configuration.
* @flags: dsi_mode_flags modifying the behavior
* @clk_handle: Clock handle for DSI clocks
*
* Updates driver with new Host configuration to use for host initialization.
* This function call will only update the software context. The stored
* configuration information will be used when the host is initialized.
*
* Return: error code.
*/
int dsi_ctrl_update_host_config(struct dsi_ctrl *dsi_ctrl,
struct dsi_host_config *config,
int flags, void *clk_handle);
/**
* dsi_ctrl_timing_db_update() - update only controller Timing DB
* @dsi_ctrl: DSI controller handle.
* @enable: Enable/disable Timing DB register
*
* Update timing db register value during dfps usecases
*
* Return: error code.
*/
int dsi_ctrl_timing_db_update(struct dsi_ctrl *dsi_ctrl,
bool enable);
/**
* dsi_ctrl_async_timing_update() - update only controller timing
* @dsi_ctrl: DSI controller handle.
* @timing: New DSI timing info
*
* Updates host timing values to asynchronously transition to new timing
* For example, to update the porch values in a seamless/dynamic fps switch.
*
* Return: error code.
*/
int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
struct dsi_mode_info *timing);
/**
* dsi_ctrl_phy_sw_reset() - perform a PHY software reset
* @dsi_ctrl: DSI controller handle.
*
* Performs a PHY software reset on the DSI controller. Reset should be done
* when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
* not enabled.
*
* This function will fail if driver is in any other state.
*
* Return: error code.
*/
int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_phy_reset_config() - Mask/unmask propagation of ahb reset signal
* to DSI PHY hardware.
* @dsi_ctrl: DSI controller handle.
* @enable: Mask/unmask the PHY reset signal.
*
* Return: error code.
*/
int dsi_ctrl_phy_reset_config(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
* dsi_ctrl_config_clk_gating() - Enable/Disable DSI PHY clk gating
* @dsi_ctrl: DSI controller handle.
* @enable: Enable/disable DSI PHY clk gating
* @clk_selection: clock selection for gating
*
* Return: error code.
*/
int dsi_ctrl_config_clk_gating(struct dsi_ctrl *dsi_ctrl, bool enable,
enum dsi_clk_gate_type clk_selection);
/**
* dsi_ctrl_soft_reset() - perform a soft reset on DSI controller
* @dsi_ctrl: DSI controller handle.
*
* The video, command and controller engines will be disabled before the
* reset is triggered. After, the engines will be re-enabled to the same state
* as before the reset.
*
* If the reset is done while MDP timing engine is turned on, the video
* engine should be re-enabled only during the vertical blanking time.
*
* Return: error code
*/
int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_host_timing_update - reinitialize host with new timing values
* @dsi_ctrl: DSI controller handle.
*
* Reinitialize DSI controller hardware with new display timing values
* when resolution is switched dynamically.
*
* Return: error code
*/
int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
* @is_splash_enabled: boolean signifying splash status.
*
* Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during
* DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
* performed.
*
* Return: error code.
*/
int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled);
/**
* dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
*
* De-initializes DSI controller hardware. It can be performed only during
* DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
*
* Return: error code.
*/
int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
* @dsi_ctrl: DSI controller handle.
* @enable: enable/disable ULPS.
*
* ULPS can be enabled/disabled after DSI host engine is turned on.
*
* Return: error code.
*/
int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
* dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
* @dsi_ctrl: DSI controller handle.
*
* Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during
* DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
* performed.
*
* Also used to program the video mode timing values.
*
* Return: error code.
*/
int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_set_roi() - Set DSI controller's region of interest
* @dsi_ctrl: DSI controller handle.
* @roi: Region of interest rectangle, must be less than mode bounds
* @changed: Output parameter, set to true of the controller's ROI was
* dirtied by setting the new ROI, and DCS cmd update needed
*
* Return: error code.
*/
int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi,
bool *changed);
/**
* dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
* @dsi_ctrl: DSI controller handle.
* @on: enable/disable test pattern.
*
* Test pattern can be enabled only after Video engine (for video mode panels)
* or command engine (for cmd mode panels) is enabled.
*
* Return: error code.
*/
int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
/**
* dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
* @dsi_ctrl: DSI controller handle.
* @msg: Message to transfer on DSI link.
* @flags: Modifiers for message transfer.
*
* Command transfer can be done only when command engine is enabled. The
* transfer API will until either the command transfer finishes or the timeout
* value is reached. If the trigger is deferred, it will return without
* triggering the transfer. Command parameters are programmed to hardware.
*
* Return: error code.
*/
int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
const struct mipi_dsi_msg *msg,
u32 flags);
/**
* dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
* @dsi_ctrl: DSI controller handle.
* @flags: Modifiers.
*
* Return: error code.
*/
int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
/**
* dsi_ctrl_update_host_engine_state_for_cont_splash() - update engine
* states for cont splash usecase
* @dsi_ctrl: DSI controller handle.
* @state: DSI engine state
*
* Return: error code.
*/
int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
enum dsi_engine_state state);
/**
* dsi_ctrl_set_power_state() - set power state for dsi controller
* @dsi_ctrl: DSI controller handle.
* @state: Power state.
*
* Set power state for DSI controller. Power state can be changed only when
* Controller, Video and Command engines are turned off.
*
* Return: error code.
*/
int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_power_state state);
/**
* dsi_ctrl_set_cmd_engine_state() - set command engine state
* @dsi_ctrl: DSI Controller handle.
* @state: Engine state.
*
* Command engine state can be modified only when DSI controller power state is
* set to DSI_CTRL_POWER_LINK_CLK_ON.
*
* Return: error code.
*/
int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_engine_state state);
/**
* dsi_ctrl_validate_host_state() - validate DSI ctrl host state
* @dsi_ctrl: DSI Controller handle.
*
* Validate DSI cotroller host state
*
* Return: boolean indicating whether host is not initialized.
*/
bool dsi_ctrl_validate_host_state(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_set_vid_engine_state() - set video engine state
* @dsi_ctrl: DSI Controller handle.
* @state: Engine state.
*
* Video engine state can be modified only when DSI controller power state is
* set to DSI_CTRL_POWER_LINK_CLK_ON.
*
* Return: error code.
*/
int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_engine_state state);
/**
* dsi_ctrl_set_host_engine_state() - set host engine state
* @dsi_ctrl: DSI Controller handle.
* @state: Engine state.
*
* Host engine state can be modified only when DSI controller power state is
* set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
*
* Return: error code.
*/
int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_engine_state state);
/**
* dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
* @dsi_ctrl: DSI controller handle.
* @enable: enable/disable ULPS.
*
* ULPS can be enabled/disabled after DSI host engine is turned on.
*
* Return: error code.
*/
int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
* dsi_ctrl_clk_cb_register() - Register DSI controller clk control callback
* @dsi_ctrl: DSI controller handle.
* @clk__cb: Structure containing callback for clock control.
*
* Register call for DSI clock control
*
* Return: error code.
*/
int dsi_ctrl_clk_cb_register(struct dsi_ctrl *dsi_ctrl,
struct clk_ctrl_cb *clk_cb);
/**
* dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
* @dsi_ctrl: DSI controller handle.
* @enable: enable/disable clamping.
* @ulps_enabled: ulps state.
*
* Clamps can be enabled/disabled while DSI controller is still turned on.
*
* Return: error code.
*/
int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_Ctrl,
bool enable, bool ulps_enabled);
/**
* dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
* @dsi_ctrl: DSI controller handle.
* @source_clks: Source clocks for DSI link clocks.
*
* Clock source should be changed while link clocks are disabled.
*
* Return: error code.
*/
int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
struct dsi_clk_link_set *source_clks);
/**
* dsi_ctrl_enable_status_interrupt() - enable status interrupts
* @dsi_ctrl: DSI controller handle.
* @intr_idx: Index interrupt to disable.
* @event_info: Pointer to event callback definition
*/
void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
uint32_t intr_idx, struct dsi_event_cb_info *event_info);
/**
* dsi_ctrl_disable_status_interrupt() - disable status interrupts
* @dsi_ctrl: DSI controller handle.
* @intr_idx: Index interrupt to disable.
*/
void dsi_ctrl_disable_status_interrupt(
struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
/**
* dsi_ctrl_setup_misr() - Setup frame MISR
* @dsi_ctrl: DSI controller handle.
* @enable: enable/disable MISR.
* @frame_count: Number of frames to accumulate MISR.
*
* Return: error code.
*/
int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
bool enable,
u32 frame_count);
/**
* dsi_ctrl_collect_misr() - Read frame MISR
* @dsi_ctrl: DSI controller handle.
*
* Return: MISR value.
*/
u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_cache_misr - Cache frame MISR value
* @dsi_ctrl: DSI controller handle.
*/
void dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void);
/**
* dsi_ctrl_drv_unregister() - unregister platform driver
*/
void dsi_ctrl_drv_unregister(void);
/**
* dsi_ctrl_reset() - Reset DSI PHY CLK/DATA lane
* @dsi_ctrl: DSI controller handle.
* @mask: Mask to indicate if CLK and/or DATA lane needs reset.
*/
int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask);
/**
* dsi_ctrl_get_hw_version() - read dsi controller hw revision
* @dsi_ctrl: DSI controller handle.
*/
int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_vid_engine_en() - Control DSI video engine HW state
* @dsi_ctrl: DSI controller handle.
* @on: variable to control video engine ON/OFF.
*/
int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on);
/**
* dsi_ctrl_setup_avr() - Set/Clear the AVR_SUPPORT_ENABLE bit
* @dsi_ctrl: DSI controller handle.
* @enable: variable to control AVR support ON/OFF.
*/
int dsi_ctrl_setup_avr(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
* @dsi_ctrl: DSI controller handle.
* cmd_len: Length of command.
* flags: Config mode flags.
*/
void dsi_message_setup_tx_mode(struct dsi_ctrl *dsi_ctrl, u32 cmd_len,
u32 *flags);
/**
* @dsi_ctrl: DSI controller handle.
* cmd_len: Length of command.
* flags: Config mode flags.
*/
int dsi_message_validate_tx_mode(struct dsi_ctrl *dsi_ctrl, u32 cmd_len,
u32 *flags);
/**
* dsi_ctrl_isr_configure() - API to register/deregister dsi isr
* @dsi_ctrl: DSI controller handle.
* @enable: variable to control register/deregister isr
*/
void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
* dsi_ctrl_mask_error_status_interrupts() - API to mask dsi ctrl error status
* interrupts
* @dsi_ctrl: DSI controller handle.
* @idx: id indicating which interrupts to enable/disable.
* @mask_enable: boolean to enable/disable masking.
*/
void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx,
bool mask_enable);
/**
* dsi_ctrl_irq_update() - Put a irq vote to process DSI error
* interrupts at any time.
* @dsi_ctrl: DSI controller handle.
* @enable: variable to control enable/disable irq line
*/
void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable);
/**
* dsi_ctrl_get_host_engine_init_state() - Return host init state
*/
int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl,
bool *state);
/**
* dsi_ctrl_wait_for_cmd_mode_mdp_idle() - Wait for command mode engine not to
* be busy sending data from display engine.
* @dsi_ctrl: DSI controller handle.
*/
int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_update_host_init_state() - Set the host initialization state
*/
int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool en);
/**
* dsi_ctrl_pixel_format_to_bpp() - returns number of bits per pxl
*/
int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format);
/**
* dsi_ctrl_set_continuous_clk() - API to set/unset force clock lane HS request.
* @dsi_ctrl: DSI controller handle.
* @enable: variable to control continuous clock.
*/
void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
#endif /* _DSI_CTRL_H_ */

868
msm/dsi/dsi_ctrl_hw.h Normal file
View File

@@ -0,0 +1,868 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CTRL_HW_H_
#define _DSI_CTRL_HW_H_
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include "dsi_defs.h"
/**
* Modifier flag for command transmission. If this flag is set, command
* information is programmed to hardware and transmission is not triggered.
* Caller should call the trigger_command_dma() to start the transmission. This
* flag is valed for kickoff_command() and kickoff_fifo_command() operations.
*/
#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER 0x1
/**
* enum dsi_ctrl_version - version of the dsi host controller
* @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
* @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller
* @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller
* @DSI_CTRL_VERSION_2_2: DSI host v2.2 controller
* @DSI_CTRL_VERSION_2_3: DSI host v2.3 controller
* @DSI_CTRL_VERSION_2_4: DSI host v2.4 controller
* @DSI_CTRL_VERSION_MAX: max version
*/
enum dsi_ctrl_version {
DSI_CTRL_VERSION_UNKNOWN,
DSI_CTRL_VERSION_1_4,
DSI_CTRL_VERSION_2_0,
DSI_CTRL_VERSION_2_2,
DSI_CTRL_VERSION_2_3,
DSI_CTRL_VERSION_2_4,
DSI_CTRL_VERSION_MAX
};
/**
* enum dsi_ctrl_hw_features - features supported by dsi host controller
* @DSI_CTRL_VIDEO_TPG: Test pattern support for video mode.
* @DSI_CTRL_CMD_TPG: Test pattern support for command mode.
* @DSI_CTRL_VARIABLE_REFRESH_RATE: variable panel timing
* @DSI_CTRL_DYNAMIC_REFRESH: variable pixel clock rate
* @DSI_CTRL_NULL_PACKET_INSERTION: NULL packet insertion
* @DSI_CTRL_DESKEW_CALIB: Deskew calibration support
* @DSI_CTRL_DPHY: Controller support for DPHY
* @DSI_CTRL_CPHY: Controller support for CPHY
* @DSI_CTRL_MAX_FEATURES:
*/
enum dsi_ctrl_hw_features {
DSI_CTRL_VIDEO_TPG,
DSI_CTRL_CMD_TPG,
DSI_CTRL_VARIABLE_REFRESH_RATE,
DSI_CTRL_DYNAMIC_REFRESH,
DSI_CTRL_NULL_PACKET_INSERTION,
DSI_CTRL_DESKEW_CALIB,
DSI_CTRL_DPHY,
DSI_CTRL_CPHY,
DSI_CTRL_MAX_FEATURES
};
/**
* enum dsi_test_pattern - test pattern type
* @DSI_TEST_PATTERN_FIXED: Test pattern is fixed, based on init value.
* @DSI_TEST_PATTERN_INC: Incremental test pattern, base on init value.
* @DSI_TEST_PATTERN_POLY: Pattern generated from polynomial and init val.
* @DSI_TEST_PATTERN_MAX:
*/
enum dsi_test_pattern {
DSI_TEST_PATTERN_FIXED = 0,
DSI_TEST_PATTERN_INC,
DSI_TEST_PATTERN_POLY,
DSI_TEST_PATTERN_MAX
};
/**
* enum dsi_status_int_index - index of interrupts generated by DSI controller
* @DSI_SINT_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
* @DSI_SINT_CMD_STREAM0_FRAME_DONE: A frame of cmd mode stream0 is sent out.
* @DSI_SINT_CMD_STREAM1_FRAME_DONE: A frame of cmd mode stream1 is sent out.
* @DSI_SINT_CMD_STREAM2_FRAME_DONE: A frame of cmd mode stream2 is sent out.
* @DSI_SINT_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
* @DSI_SINT_BTA_DONE: A BTA is completed.
* @DSI_SINT_CMD_FRAME_DONE: A frame of selected cmd mode stream is
* sent out by MDP.
* @DSI_SINT_DYN_REFRESH_DONE: The dynamic refresh operation completed.
* @DSI_SINT_DESKEW_DONE: The deskew calibration operation done.
* @DSI_SINT_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
* completed.
* @DSI_SINT_ERROR: DSI error has happened.
*/
enum dsi_status_int_index {
DSI_SINT_CMD_MODE_DMA_DONE = 0,
DSI_SINT_CMD_STREAM0_FRAME_DONE = 1,
DSI_SINT_CMD_STREAM1_FRAME_DONE = 2,
DSI_SINT_CMD_STREAM2_FRAME_DONE = 3,
DSI_SINT_VIDEO_MODE_FRAME_DONE = 4,
DSI_SINT_BTA_DONE = 5,
DSI_SINT_CMD_FRAME_DONE = 6,
DSI_SINT_DYN_REFRESH_DONE = 7,
DSI_SINT_DESKEW_DONE = 8,
DSI_SINT_DYN_BLANK_DMA_DONE = 9,
DSI_SINT_ERROR = 10,
DSI_STATUS_INTERRUPT_COUNT
};
/**
* enum dsi_status_int_type - status interrupts generated by DSI controller
* @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
* @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out.
* @DSI_CMD_STREAM1_FRAME_DONE: A frame of command mode stream1 is sent out.
* @DSI_CMD_STREAM2_FRAME_DONE: A frame of command mode stream2 is sent out.
* @DSI_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
* @DSI_BTA_DONE: A BTA is completed.
* @DSI_CMD_FRAME_DONE: A frame of selected command mode stream is
* sent out by MDP.
* @DSI_DYN_REFRESH_DONE: The dynamic refresh operation has completed.
* @DSI_DESKEW_DONE: The deskew calibration operation has completed
* @DSI_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
* completed.
* @DSI_ERROR: DSI error has happened.
*/
enum dsi_status_int_type {
DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
DSI_CMD_STREAM0_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM0_FRAME_DONE),
DSI_CMD_STREAM1_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM1_FRAME_DONE),
DSI_CMD_STREAM2_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM2_FRAME_DONE),
DSI_VIDEO_MODE_FRAME_DONE = BIT(DSI_SINT_VIDEO_MODE_FRAME_DONE),
DSI_BTA_DONE = BIT(DSI_SINT_BTA_DONE),
DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE),
DSI_ERROR = BIT(DSI_SINT_ERROR)
};
/**
* enum dsi_error_int_index - index of error interrupts from DSI controller
* @DSI_EINT_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
* @DSI_EINT_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
* @DSI_EINT_RDBK_CRC_ERR: CRC error in read packet.
* @DSI_EINT_RDBK_INCOMPLETE_PKT: Incomplete read packet.
* @DSI_EINT_PERIPH_ERROR_PKT: Error packet returned from peripheral,
* @DSI_EINT_LP_RX_TIMEOUT: Low power reverse transmission timeout.
* @DSI_EINT_HS_TX_TIMEOUT: High speed fwd transmission timeout.
* @DSI_EINT_BTA_TIMEOUT: BTA timeout.
* @DSI_EINT_PLL_UNLOCK: PLL has unlocked.
* @DSI_EINT_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
* @DSI_EINT_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
* @DSI_EINT_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
* @DSI_EINT_PANEL_SPECIFIC_ERR: DSI Protocol violation error.
* @DSI_EINT_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
* @DSI_EINT_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
* @DSI_EINT_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
* receive one complete line from MDP).
* @DSI_EINT_DLN0_HS_FIFO_OVERFLOW: High speed FIFO data lane 0 overflows.
* @DSI_EINT_DLN1_HS_FIFO_OVERFLOW: High speed FIFO data lane 1 overflows.
* @DSI_EINT_DLN2_HS_FIFO_OVERFLOW: High speed FIFO data lane 2 overflows.
* @DSI_EINT_DLN3_HS_FIFO_OVERFLOW: High speed FIFO data lane 3 overflows.
* @DSI_EINT_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO data lane 0 underflows.
* @DSI_EINT_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO data lane 1 underflows.
* @DSI_EINT_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO data lane 2 underflows.
* @DSI_EINT_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO data lane 3 undeflows.
* @DSI_EINT_DLN0_LP0_CONTENTION: PHY level contention while lane 0 low.
* @DSI_EINT_DLN1_LP0_CONTENTION: PHY level contention while lane 1 low.
* @DSI_EINT_DLN2_LP0_CONTENTION: PHY level contention while lane 2 low.
* @DSI_EINT_DLN3_LP0_CONTENTION: PHY level contention while lane 3 low.
* @DSI_EINT_DLN0_LP1_CONTENTION: PHY level contention while lane 0 high.
* @DSI_EINT_DLN1_LP1_CONTENTION: PHY level contention while lane 1 high.
* @DSI_EINT_DLN2_LP1_CONTENTION: PHY level contention while lane 2 high.
* @DSI_EINT_DLN3_LP1_CONTENTION: PHY level contention while lane 3 high.
*/
enum dsi_error_int_index {
DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
DSI_EINT_RDBK_MULTI_ECC_ERR = 1,
DSI_EINT_RDBK_CRC_ERR = 2,
DSI_EINT_RDBK_INCOMPLETE_PKT = 3,
DSI_EINT_PERIPH_ERROR_PKT = 4,
DSI_EINT_LP_RX_TIMEOUT = 5,
DSI_EINT_HS_TX_TIMEOUT = 6,
DSI_EINT_BTA_TIMEOUT = 7,
DSI_EINT_PLL_UNLOCK = 8,
DSI_EINT_DLN0_ESC_ENTRY_ERR = 9,
DSI_EINT_DLN0_ESC_SYNC_ERR = 10,
DSI_EINT_DLN0_LP_CONTROL_ERR = 11,
DSI_EINT_PANEL_SPECIFIC_ERR = 12,
DSI_EINT_INTERLEAVE_OP_CONTENTION = 13,
DSI_EINT_CMD_DMA_FIFO_UNDERFLOW = 14,
DSI_EINT_CMD_MDP_FIFO_UNDERFLOW = 15,
DSI_EINT_DLN0_HS_FIFO_OVERFLOW = 16,
DSI_EINT_DLN1_HS_FIFO_OVERFLOW = 17,
DSI_EINT_DLN2_HS_FIFO_OVERFLOW = 18,
DSI_EINT_DLN3_HS_FIFO_OVERFLOW = 19,
DSI_EINT_DLN0_HS_FIFO_UNDERFLOW = 20,
DSI_EINT_DLN1_HS_FIFO_UNDERFLOW = 21,
DSI_EINT_DLN2_HS_FIFO_UNDERFLOW = 22,
DSI_EINT_DLN3_HS_FIFO_UNDERFLOW = 23,
DSI_EINT_DLN0_LP0_CONTENTION = 24,
DSI_EINT_DLN1_LP0_CONTENTION = 25,
DSI_EINT_DLN2_LP0_CONTENTION = 26,
DSI_EINT_DLN3_LP0_CONTENTION = 27,
DSI_EINT_DLN0_LP1_CONTENTION = 28,
DSI_EINT_DLN1_LP1_CONTENTION = 29,
DSI_EINT_DLN2_LP1_CONTENTION = 30,
DSI_EINT_DLN3_LP1_CONTENTION = 31,
DSI_ERROR_INTERRUPT_COUNT
};
/**
* enum dsi_error_int_type - error interrupts generated by DSI controller
* @DSI_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
* @DSI_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
* @DSI_RDBK_CRC_ERR: CRC error in read packet.
* @DSI_RDBK_INCOMPLETE_PKT: Incomplete read packet.
* @DSI_PERIPH_ERROR_PKT: Error packet returned from peripheral,
* @DSI_LP_RX_TIMEOUT: Low power reverse transmission timeout.
* @DSI_HS_TX_TIMEOUT: High speed forward transmission timeout.
* @DSI_BTA_TIMEOUT: BTA timeout.
* @DSI_PLL_UNLOCK: PLL has unlocked.
* @DSI_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
* @DSI_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
* @DSI_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
* @DSI_PANEL_SPECIFIC_ERR: DSI Protocol violation.
* @DSI_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
* @DSI_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
* @DSI_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
* receive one complete line from MDP).
* @DSI_DLN0_HS_FIFO_OVERFLOW: High speed FIFO for data lane 0 overflows.
* @DSI_DLN1_HS_FIFO_OVERFLOW: High speed FIFO for data lane 1 overflows.
* @DSI_DLN2_HS_FIFO_OVERFLOW: High speed FIFO for data lane 2 overflows.
* @DSI_DLN3_HS_FIFO_OVERFLOW: High speed FIFO for data lane 3 overflows.
* @DSI_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 0 underflows.
* @DSI_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 1 underflows.
* @DSI_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 2 underflows.
* @DSI_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 3 undeflows.
* @DSI_DLN0_LP0_CONTENTION: PHY level contention while lane 0 is low.
* @DSI_DLN1_LP0_CONTENTION: PHY level contention while lane 1 is low.
* @DSI_DLN2_LP0_CONTENTION: PHY level contention while lane 2 is low.
* @DSI_DLN3_LP0_CONTENTION: PHY level contention while lane 3 is low.
* @DSI_DLN0_LP1_CONTENTION: PHY level contention while lane 0 is high.
* @DSI_DLN1_LP1_CONTENTION: PHY level contention while lane 1 is high.
* @DSI_DLN2_LP1_CONTENTION: PHY level contention while lane 2 is high.
* @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
*/
enum dsi_error_int_type {
DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
DSI_RDBK_MULTI_ECC_ERR = BIT(DSI_EINT_RDBK_MULTI_ECC_ERR),
DSI_RDBK_CRC_ERR = BIT(DSI_EINT_RDBK_CRC_ERR),
DSI_RDBK_INCOMPLETE_PKT = BIT(DSI_EINT_RDBK_INCOMPLETE_PKT),
DSI_PERIPH_ERROR_PKT = BIT(DSI_EINT_PERIPH_ERROR_PKT),
DSI_LP_RX_TIMEOUT = BIT(DSI_EINT_LP_RX_TIMEOUT),
DSI_HS_TX_TIMEOUT = BIT(DSI_EINT_HS_TX_TIMEOUT),
DSI_BTA_TIMEOUT = BIT(DSI_EINT_BTA_TIMEOUT),
DSI_PLL_UNLOCK = BIT(DSI_EINT_PLL_UNLOCK),
DSI_DLN0_ESC_ENTRY_ERR = BIT(DSI_EINT_DLN0_ESC_ENTRY_ERR),
DSI_DLN0_ESC_SYNC_ERR = BIT(DSI_EINT_DLN0_ESC_SYNC_ERR),
DSI_DLN0_LP_CONTROL_ERR = BIT(DSI_EINT_DLN0_LP_CONTROL_ERR),
DSI_PANEL_SPECIFIC_ERR = BIT(DSI_EINT_PANEL_SPECIFIC_ERR),
DSI_INTERLEAVE_OP_CONTENTION = BIT(DSI_EINT_INTERLEAVE_OP_CONTENTION),
DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_DMA_FIFO_UNDERFLOW),
DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_MDP_FIFO_UNDERFLOW),
DSI_DLN0_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_OVERFLOW),
DSI_DLN1_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_OVERFLOW),
DSI_DLN2_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_OVERFLOW),
DSI_DLN3_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_OVERFLOW),
DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_UNDERFLOW),
DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_UNDERFLOW),
DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_UNDERFLOW),
DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_UNDERFLOW),
DSI_DLN0_LP0_CONTENTION = BIT(DSI_EINT_DLN0_LP0_CONTENTION),
DSI_DLN1_LP0_CONTENTION = BIT(DSI_EINT_DLN1_LP0_CONTENTION),
DSI_DLN2_LP0_CONTENTION = BIT(DSI_EINT_DLN2_LP0_CONTENTION),
DSI_DLN3_LP0_CONTENTION = BIT(DSI_EINT_DLN3_LP0_CONTENTION),
DSI_DLN0_LP1_CONTENTION = BIT(DSI_EINT_DLN0_LP1_CONTENTION),
DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
};
/**
* struct dsi_ctrl_cmd_dma_info - command buffer information
* @offset: IOMMU VA for command buffer address.
* @length: Length of the command buffer.
* @datatype: Datatype of cmd.
* @en_broadcast: Enable broadcast mode if set to true.
* @is_master: Is master in broadcast mode.
* @use_lpm: Use low power mode for command transmission.
*/
struct dsi_ctrl_cmd_dma_info {
u32 offset;
u32 length;
u8 datatype;
bool en_broadcast;
bool is_master;
bool use_lpm;
};
/**
* struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
* @command: VA for command buffer.
* @size: Size of the command buffer.
* @en_broadcast: Enable broadcast mode if set to true.
* @is_master: Is master in broadcast mode.
* @use_lpm: Use low power mode for command transmission.
*/
struct dsi_ctrl_cmd_dma_fifo_info {
u32 *command;
u32 size;
bool en_broadcast;
bool is_master;
bool use_lpm;
};
struct dsi_ctrl_hw;
struct ctrl_ulps_config_ops {
/**
* ulps_request() - request ulps entry for specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to enter ULPS.
*
* Caller should check if lanes are in ULPS mode by calling
* get_lanes_in_ulps() operation.
*/
void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
/**
* ulps_exit() - exit ULPS on specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to exit ULPS.
*
* Caller should check if lanes are in active mode by calling
* get_lanes_in_ulps() operation.
*/
void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
/**
* get_lanes_in_ulps() - returns the list of lanes in ULPS mode
* @ctrl: Pointer to the controller host hardware.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
* state. If 0 is returned, all the lanes are active.
*
* Return: List of lanes in ULPS state.
*/
u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
};
/**
* struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
*/
struct dsi_ctrl_hw_ops {
/**
* host_setup() - Setup DSI host configuration
* @ctrl: Pointer to controller host hardware.
* @config: Configuration for DSI host controller
*/
void (*host_setup)(struct dsi_ctrl_hw *ctrl,
struct dsi_host_common_cfg *config);
/**
* video_engine_en() - enable DSI video engine
* @ctrl: Pointer to controller host hardware.
* @on: Enable/disabel video engine.
*/
void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
/**
* setup_avr() - set the AVR_SUPPORT_ENABLE bit in DSI_VIDEO_MODE_CTRL
* @ctrl: Pointer to controller host hardware.
* @enable: Controls whether this bit is set or cleared
*/
void (*setup_avr)(struct dsi_ctrl_hw *ctrl, bool enable);
/**
* video_engine_setup() - Setup dsi host controller for video mode
* @ctrl: Pointer to controller host hardware.
* @common_cfg: Common configuration parameters.
* @cfg: Video mode configuration.
*
* Set up DSI video engine with a specific configuration. Controller and
* video engine are not enabled as part of this function.
*/
void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
struct dsi_host_common_cfg *common_cfg,
struct dsi_video_engine_cfg *cfg);
/**
* set_video_timing() - set up the timing for video frame
* @ctrl: Pointer to controller host hardware.
* @mode: Video mode information.
*
* Set up the video timing parameters for the DSI video mode operation.
*/
void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
struct dsi_mode_info *mode);
/**
* cmd_engine_setup() - setup dsi host controller for command mode
* @ctrl: Pointer to the controller host hardware.
* @common_cfg: Common configuration parameters.
* @cfg: Command mode configuration.
*
* Setup DSI CMD engine with a specific configuration. Controller and
* command engine are not enabled as part of this function.
*/
void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
struct dsi_host_common_cfg *common_cfg,
struct dsi_cmd_engine_cfg *cfg);
/**
* setup_cmd_stream() - set up parameters for command pixel streams
* @ctrl: Pointer to controller host hardware.
* @mode: Pointer to mode information.
* @h_stride: Horizontal stride in bytes.
* @vc_id: stream_id.
*
* Setup parameters for command mode pixel stream size.
*/
void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
struct dsi_mode_info *mode,
u32 h_stride,
u32 vc_id,
struct dsi_rect *roi);
/**
* ctrl_en() - enable DSI controller engine
* @ctrl: Pointer to the controller host hardware.
* @on: turn on/off the DSI controller engine.
*/
void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
/**
* cmd_engine_en() - enable DSI controller command engine
* @ctrl: Pointer to the controller host hardware.
* @on: Turn on/off the DSI command engine.
*/
void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
/**
* phy_sw_reset() - perform a soft reset on the PHY.
* @ctrl: Pointer to the controller host hardware.
*/
void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
/**
* config_clk_gating() - enable/disable DSI PHY clk gating
* @ctrl: Pointer to the controller host hardware.
* @enable: enable/disable DSI PHY clock gating.
* @clk_selection: clock to enable/disable clock gating.
*/
void (*config_clk_gating)(struct dsi_ctrl_hw *ctrl, bool enable,
enum dsi_clk_gate_type clk_selection);
/**
* debug_bus() - get dsi debug bus status.
* @ctrl: Pointer to the controller host hardware.
* @entries: Array of dsi debug bus control values.
* @size: Size of dsi debug bus control array.
*/
void (*debug_bus)(struct dsi_ctrl_hw *ctrl, u32 *entries, u32 size);
/**
* soft_reset() - perform a soft reset on DSI controller
* @ctrl: Pointer to the controller host hardware.
*
* The video, command and controller engines will be disabled before the
* reset is triggered. After, the engines will be re-enabled to the same
* state as before the reset.
*
* If the reset is done while MDP timing engine is turned on, the video
* engine should be re-enabled only during the vertical blanking time.
*/
void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
/**
* setup_lane_map() - setup mapping between logical and physical lanes
* @ctrl: Pointer to the controller host hardware.
* @lane_map: Structure defining the mapping between DSI logical
* lanes and physical lanes.
*/
void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map);
/**
* kickoff_command() - transmits commands stored in memory
* @ctrl: Pointer to the controller host hardware.
* @cmd: Command information.
* @flags: Modifiers for command transmission.
*
* The controller hardware is programmed with address and size of the
* command buffer. The transmission is kicked off if
* DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
* set, caller should make a separate call to trigger_command_dma() to
* transmit the command.
*/
void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags);
/**
* kickoff_command_non_embedded_mode() - cmd in non embedded mode
* @ctrl: Pointer to the controller host hardware.
* @cmd: Command information.
* @flags: Modifiers for command transmission.
*
* If command length is greater than DMA FIFO size of 256 bytes we use
* this non- embedded mode.
* The controller hardware is programmed with address and size of the
* command buffer. The transmission is kicked off if
* DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
* set, caller should make a separate call to trigger_command_dma() to
* transmit the command.
*/
void (*kickoff_command_non_embedded_mode)(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags);
/**
* kickoff_fifo_command() - transmits a command using FIFO in dsi
* hardware.
* @ctrl: Pointer to the controller host hardware.
* @cmd: Command information.
* @flags: Modifiers for command transmission.
*
* The controller hardware FIFO is programmed with command header and
* payload. The transmission is kicked off if
* DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
* set, caller should make a separate call to trigger_command_dma() to
* transmit the command.
*/
void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_fifo_info *cmd,
u32 flags);
void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
/**
* trigger_command_dma() - trigger transmission of command buffer.
* @ctrl: Pointer to the controller host hardware.
*
* This trigger can be only used if there was a prior call to
* kickoff_command() of kickoff_fifo_command() with
* DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
*/
void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
/**
* get_cmd_read_data() - get data read from the peripheral
* @ctrl: Pointer to the controller host hardware.
* @rd_buf: Buffer where data will be read into.
* @read_offset: Offset from where to read.
* @rx_byte: Number of bytes to be read.
* @pkt_size: Size of response expected.
* @hw_read_cnt: Actual number of bytes read by HW.
*/
u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
u8 *rd_buf,
u32 read_offset,
u32 rx_byte,
u32 pkt_size,
u32 *hw_read_cnt);
/**
* get_cont_splash_status() - get continuous splash status
* @ctrl: Pointer to the controller host hardware.
*/
bool (*get_cont_splash_status)(struct dsi_ctrl_hw *ctrl);
/**
* wait_for_lane_idle() - wait for DSI lanes to go to idle state
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to be checked to be in idle state.
*/
int (*wait_for_lane_idle)(struct dsi_ctrl_hw *ctrl, u32 lanes);
struct ctrl_ulps_config_ops ulps_ops;
/**
* clamp_enable() - enable DSI clamps
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to have clamps released.
* @enable_ulps: ulps state.
*/
/**
* clamp_enable() - enable DSI clamps to keep PHY driving a stable link
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to have clamps released.
* @enable_ulps: TODO:??
*/
void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool enable_ulps);
/**
* clamp_disable() - disable DSI clamps
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to have clamps released.
* @disable_ulps: ulps state.
*/
void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool disable_ulps);
/**
* phy_reset_config() - Disable/enable propagation of reset signal
* from ahb domain to DSI PHY
* @ctrl: Pointer to the controller host hardware.
* @enable: True to mask the reset signal, false to unmask
*/
void (*phy_reset_config)(struct dsi_ctrl_hw *ctrl,
bool enable);
/**
* get_interrupt_status() - returns the interrupt status
* @ctrl: Pointer to the controller host hardware.
*
* Returns the ORed list of interrupts(enum dsi_status_int_type) that
* are active. This list does not include any error interrupts. Caller
* should call get_error_status for error interrupts.
*
* Return: List of active interrupts.
*/
u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
/**
* clear_interrupt_status() - clears the specified interrupts
* @ctrl: Pointer to the controller host hardware.
* @ints: List of interrupts to be cleared.
*/
void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
/**
* enable_status_interrupts() - enable the specified interrupts
* @ctrl: Pointer to the controller host hardware.
* @ints: List of interrupts to be enabled.
*
* Enables the specified interrupts. This list will override the
* previous interrupts enabled through this function. Caller has to
* maintain the state of the interrupts enabled. To disable all
* interrupts, set ints to 0.
*/
void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
/**
* get_error_status() - returns the error status
* @ctrl: Pointer to the controller host hardware.
*
* Returns the ORed list of errors(enum dsi_error_int_type) that are
* active. This list does not include any status interrupts. Caller
* should call get_interrupt_status for status interrupts.
*
* Return: List of active error interrupts.
*/
u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
/**
* clear_error_status() - clears the specified errors
* @ctrl: Pointer to the controller host hardware.
* @errors: List of errors to be cleared.
*/
void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
/**
* enable_error_interrupts() - enable the specified interrupts
* @ctrl: Pointer to the controller host hardware.
* @errors: List of errors to be enabled.
*
* Enables the specified interrupts. This list will override the
* previous interrupts enabled through this function. Caller has to
* maintain the state of the interrupts enabled. To disable all
* interrupts, set errors to 0.
*/
void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
/**
* video_test_pattern_setup() - setup test pattern engine for video mode
* @ctrl: Pointer to the controller host hardware.
* @type: Type of test pattern.
* @init_val: Initial value to use for generating test pattern.
*/
void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
enum dsi_test_pattern type,
u32 init_val);
/**
* cmd_test_pattern_setup() - setup test patttern engine for cmd mode
* @ctrl: Pointer to the controller host hardware.
* @type: Type of test pattern.
* @init_val: Initial value to use for generating test pattern.
* @stream_id: Stream Id on which packets are generated.
*/
void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
enum dsi_test_pattern type,
u32 init_val,
u32 stream_id);
/**
* test_pattern_enable() - enable test pattern engine
* @ctrl: Pointer to the controller host hardware.
* @enable: Enable/Disable test pattern engine.
*/
void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
/**
* clear_phy0_ln_err() - clear DSI PHY lane-0 errors
* @ctrl: Pointer to the controller host hardware.
*/
void (*clear_phy0_ln_err)(struct dsi_ctrl_hw *ctrl);
/**
* trigger_cmd_test_pattern() - trigger a command mode frame update with
* test pattern
* @ctrl: Pointer to the controller host hardware.
* @stream_id: Stream on which frame update is sent.
*/
void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
u32 stream_id);
ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size);
/**
* setup_misr() - Setup frame MISR
* @ctrl: Pointer to the controller host hardware.
* @panel_mode: CMD or VIDEO mode indicator
* @enable: Enable/disable MISR.
* @frame_count: Number of frames to accumulate MISR.
*/
void (*setup_misr)(struct dsi_ctrl_hw *ctrl,
enum dsi_op_mode panel_mode,
bool enable, u32 frame_count);
/**
* collect_misr() - Read frame MISR
* @ctrl: Pointer to the controller host hardware.
* @panel_mode: CMD or VIDEO mode indicator
*/
u32 (*collect_misr)(struct dsi_ctrl_hw *ctrl,
enum dsi_op_mode panel_mode);
/**
* set_timing_db() - enable/disable Timing DB register
* @ctrl: Pointer to controller host hardware.
* @enable: Enable/Disable flag.
*
* Enable or Disabe the Timing DB register.
*/
void (*set_timing_db)(struct dsi_ctrl_hw *ctrl,
bool enable);
/**
* clear_rdbk_register() - Clear and reset read back register
* @ctrl: Pointer to the controller host hardware.
*/
void (*clear_rdbk_register)(struct dsi_ctrl_hw *ctrl);
/** schedule_dma_cmd() - Schdeule DMA command transfer on a
* particular blanking line.
* @ctrl: Pointer to the controller host hardware.
* @line_no: Blanking line number on whihch DMA command
* needs to be sent.
*/
void (*schedule_dma_cmd)(struct dsi_ctrl_hw *ctrl, int line_no);
/**
* ctrl_reset() - Reset DSI lanes to recover from DSI errors
* @ctrl: Pointer to the controller host hardware.
* @mask: Indicates the error type.
*/
int (*ctrl_reset)(struct dsi_ctrl_hw *ctrl, int mask);
/**
* mask_error_int() - Mask/Unmask particular DSI error interrupts
* @ctrl: Pointer to the controller host hardware.
* @idx: Indicates the errors to be masked.
* @en: Bool for mask or unmask of the error
*/
void (*mask_error_intr)(struct dsi_ctrl_hw *ctrl, u32 idx, bool en);
/**
* error_intr_ctrl() - Mask/Unmask master DSI error interrupt
* @ctrl: Pointer to the controller host hardware.
* @en: Bool for mask or unmask of DSI error
*/
void (*error_intr_ctrl)(struct dsi_ctrl_hw *ctrl, bool en);
/**
* get_error_mask() - get DSI error interrupt mask status
* @ctrl: Pointer to the controller host hardware.
*/
u32 (*get_error_mask)(struct dsi_ctrl_hw *ctrl);
/**
* get_hw_version() - get DSI controller hw version
* @ctrl: Pointer to the controller host hardware.
*/
u32 (*get_hw_version)(struct dsi_ctrl_hw *ctrl);
/**
* wait_for_cmd_mode_mdp_idle() - wait for command mode engine not to
* be busy sending data from display engine
* @ctrl: Pointer to the controller host hardware.
*/
int (*wait_for_cmd_mode_mdp_idle)(struct dsi_ctrl_hw *ctrl);
/**
* hw.ops.set_continuous_clk() - Set continuous clock
* @ctrl: Pointer to the controller host hardware.
* @enable: Bool to control continuous clock request.
*/
void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable);
};
/*
* struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
* @base: VA for the DSI controller base address.
* @length: Length of the DSI controller register map.
* @mmss_misc_base: Base address of mmss_misc register map.
* @mmss_misc_length: Length of mmss_misc register map.
* @disp_cc_base: Base address of disp_cc register map.
* @disp_cc_length: Length of disp_cc register map.
* @index: Instance ID of the controller.
* @feature_map: Features supported by the DSI controller.
* @ops: Function pointers to the operations supported by the
* controller.
* @supported_interrupts: Number of supported interrupts.
* @supported_errors: Number of supported errors.
* @phy_isolation_enabled: A boolean property allows to isolate the phy from
* dsi controller and run only dsi controller.
* @null_insertion_enabled: A boolean property to allow dsi controller to
* insert null packet.
*/
struct dsi_ctrl_hw {
void __iomem *base;
u32 length;
void __iomem *mmss_misc_base;
u32 mmss_misc_length;
void __iomem *disp_cc_base;
u32 disp_cc_length;
u32 index;
/* features */
DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
struct dsi_ctrl_hw_ops ops;
/* capabilities */
u32 supported_interrupts;
u64 supported_errors;
bool phy_isolation_enabled;
bool null_insertion_enabled;
};
#endif /* _DSI_CTRL_HW_H_ */

479
msm/dsi/dsi_ctrl_hw_1_4.c Normal file
View File

@@ -0,0 +1,479 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-hw:" fmt
#include <linux/delay.h>
#include <linux/iopoll.h>
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
#define MMSS_MISC_CLAMP_REG_OFF 0x0014
/**
* dsi_ctrl_hw_14_setup_lane_map() - setup mapping between
* logical and physical lanes
* @ctrl: Pointer to the controller host hardware.
* @lane_map: Structure defining the mapping between DSI logical
* lanes and physical lanes.
*/
void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map)
{
DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, lane_map->lane_map_v1);
pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
}
/**
* dsi_ctrl_hw_14_wait_for_lane_idle()
* This function waits for all the active DSI lanes to be idle by polling all
* the FIFO_EMPTY bits and polling he lane status to ensure that all the lanes
* are in stop state. This function assumes that the bus clocks required to
* access the registers are already turned on.
*
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to be stopped.
*
* return: Error code.
*/
int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
int rc = 0, val = 0;
u32 stop_state_mask = 0, fifo_empty_mask = 0;
u32 const sleep_us = 10;
u32 const timeout_us = 100;
if (lanes & DSI_DATA_LANE_0) {
stop_state_mask |= BIT(0);
fifo_empty_mask |= (BIT(12) | BIT(16));
}
if (lanes & DSI_DATA_LANE_1) {
stop_state_mask |= BIT(1);
fifo_empty_mask |= BIT(20);
}
if (lanes & DSI_DATA_LANE_2) {
stop_state_mask |= BIT(2);
fifo_empty_mask |= BIT(24);
}
if (lanes & DSI_DATA_LANE_3) {
stop_state_mask |= BIT(3);
fifo_empty_mask |= BIT(28);
}
pr_debug("%s: polling for fifo empty, mask=0x%08x\n", __func__,
fifo_empty_mask);
rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
(val & fifo_empty_mask), sleep_us, timeout_us);
if (rc) {
pr_err("%s: fifo not empty, FIFO_STATUS=0x%08x\n",
__func__, val);
goto error;
}
pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
__func__, stop_state_mask);
rc = readl_poll_timeout(ctrl->base + DSI_LANE_STATUS, val,
(val & stop_state_mask), sleep_us, timeout_us);
if (rc) {
pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
__func__, val);
goto error;
}
error:
return rc;
}
/**
* ulps_request() - request ulps entry for specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to enter ULPS.
*
* Caller should check if lanes are in ULPS mode by calling
* get_lanes_in_ulps() operation.
*/
void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
reg = DSI_R32(ctrl, DSI_LANE_CTRL);
if (lanes & DSI_CLOCK_LANE)
reg |= BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
/*
* ULPS entry request. Wait for short time to make sure
* that the lanes enter ULPS. Recommended as per HPG.
*/
DSI_W32(ctrl, DSI_LANE_CTRL, reg);
usleep_range(100, 110);
pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
lanes);
}
/**
* ulps_exit() - exit ULPS on specified lanes
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to exit ULPS.
*
* Caller should check if lanes are in active mode by calling
* get_lanes_in_ulps() operation.
*/
void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
{
u32 reg = 0;
u32 prev_reg = 0;
prev_reg = DSI_R32(ctrl, DSI_LANE_CTRL);
prev_reg &= BIT(24);
if (lanes & DSI_CLOCK_LANE)
reg |= BIT(12);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(8);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(9);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(10);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(11);
/*
* ULPS Exit Request
* Hardware requirement is to wait for at least 1ms
*/
DSI_W32(ctrl, DSI_LANE_CTRL, reg | prev_reg);
usleep_range(1000, 1010);
/*
* Sometimes when exiting ULPS, it is possible that some DSI
* lanes are not in the stop state which could lead to DSI
* commands not going through. To avoid this, force the lanes
* to be in stop state.
*/
DSI_W32(ctrl, DSI_LANE_CTRL, (reg << 8) | prev_reg);
wmb(); /* ensure lanes are put to stop state */
DSI_W32(ctrl, DSI_LANE_CTRL, 0x0 | prev_reg);
wmb(); /* ensure lanes are put to stop state */
pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
ctrl->index, lanes);
}
/**
* get_lanes_in_ulps() - returns the list of lanes in ULPS mode
* @ctrl: Pointer to the controller host hardware.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
* state. If 0 is returned, all the lanes are active.
*
* Return: List of lanes in ULPS state.
*/
u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
{
u32 reg = 0;
u32 lanes = 0;
reg = DSI_R32(ctrl, DSI_LANE_STATUS);
if (!(reg & BIT(8)))
lanes |= DSI_DATA_LANE_0;
if (!(reg & BIT(9)))
lanes |= DSI_DATA_LANE_1;
if (!(reg & BIT(10)))
lanes |= DSI_DATA_LANE_2;
if (!(reg & BIT(11)))
lanes |= DSI_DATA_LANE_3;
if (!(reg & BIT(12)))
lanes |= DSI_CLOCK_LANE;
pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
return lanes;
}
/**
* clamp_enable() - enable DSI clamps to keep PHY driving a stable link
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to be clamped.
* @enable_ulps: Boolean to specify if ULPS is enabled in DSI controller
*/
void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool enable_ulps)
{
u32 clamp_reg = 0;
u32 bit_shift = 0;
u32 reg = 0;
if (ctrl->index == 1)
bit_shift = 16;
if (lanes & DSI_CLOCK_LANE) {
clamp_reg |= BIT(9);
if (enable_ulps)
clamp_reg |= BIT(8);
}
if (lanes & DSI_DATA_LANE_0) {
clamp_reg |= BIT(7);
if (enable_ulps)
clamp_reg |= BIT(6);
}
if (lanes & DSI_DATA_LANE_1) {
clamp_reg |= BIT(5);
if (enable_ulps)
clamp_reg |= BIT(4);
}
if (lanes & DSI_DATA_LANE_2) {
clamp_reg |= BIT(3);
if (enable_ulps)
clamp_reg |= BIT(2);
}
if (lanes & DSI_DATA_LANE_3) {
clamp_reg |= BIT(1);
if (enable_ulps)
clamp_reg |= BIT(0);
}
reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
reg |= (clamp_reg << bit_shift);
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
reg |= (BIT(15) << bit_shift); /* Enable clamp */
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
lanes);
}
/**
* clamp_disable() - disable DSI clamps
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes which need to have clamps released.
* @disable_ulps: Boolean to specify if ULPS is enabled in DSI controller
*/
void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
u32 lanes,
bool disable_ulps)
{
u32 clamp_reg = 0;
u32 bit_shift = 0;
u32 reg = 0;
if (ctrl->index == 1)
bit_shift = 16;
if (lanes & DSI_CLOCK_LANE) {
clamp_reg |= BIT(9);
if (disable_ulps)
clamp_reg |= BIT(8);
}
if (lanes & DSI_DATA_LANE_0) {
clamp_reg |= BIT(7);
if (disable_ulps)
clamp_reg |= BIT(6);
}
if (lanes & DSI_DATA_LANE_1) {
clamp_reg |= BIT(5);
if (disable_ulps)
clamp_reg |= BIT(4);
}
if (lanes & DSI_DATA_LANE_2) {
clamp_reg |= BIT(3);
if (disable_ulps)
clamp_reg |= BIT(2);
}
if (lanes & DSI_DATA_LANE_3) {
clamp_reg |= BIT(1);
if (disable_ulps)
clamp_reg |= BIT(0);
}
clamp_reg |= BIT(15); /* Enable clamp */
clamp_reg <<= bit_shift;
reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
reg &= ~(clamp_reg);
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
}
#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size)
{
u32 len = 0;
len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HW_VERSION));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA3));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TRIG_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_INT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_SOFT_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_PHY_SW_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VBIF_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AES_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VERSION));
pr_err("LLENGTH = %d\n", len);
return len;
}

225
msm/dsi/dsi_ctrl_hw_2_0.c Normal file
View File

@@ -0,0 +1,225 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-hw:" fmt
#include <linux/delay.h>
#include <linux/iopoll.h>
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl,
struct dsi_lane_map *lane_map)
{
u32 reg_value = lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4) |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] << 8) |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 12);
DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
}
int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl,
u32 lanes)
{
int rc = 0, val = 0;
u32 fifo_empty_mask = 0;
u32 const sleep_us = 10;
u32 const timeout_us = 100;
if (lanes & DSI_DATA_LANE_0)
fifo_empty_mask |= (BIT(12) | BIT(16));
if (lanes & DSI_DATA_LANE_1)
fifo_empty_mask |= BIT(20);
if (lanes & DSI_DATA_LANE_2)
fifo_empty_mask |= BIT(24);
if (lanes & DSI_DATA_LANE_3)
fifo_empty_mask |= BIT(28);
pr_debug("%s: polling for fifo empty, mask=0x%08x\n", __func__,
fifo_empty_mask);
rc = readl_poll_timeout(ctrl->base + DSI_FIFO_STATUS, val,
(val & fifo_empty_mask), sleep_us, timeout_us);
if (rc) {
pr_err("%s: fifo not empty, FIFO_STATUS=0x%08x\n",
__func__, val);
goto error;
}
error:
return rc;
}
#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size)
{
u32 len = 0;
len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HW_VERSION));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA3));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TRIG_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_CMD_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_VIDEO_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_INT_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_SOFT_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_CLK_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_PHY_SW_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_CMD_MDP0_32BIT));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_CMD_MDP1_32BIT));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_MISR_VIDEO_32BIT));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VBIF_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AES_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_VERSION));
pr_err("LLENGTH = %d\n", len);
return len;
}

149
msm/dsi/dsi_ctrl_hw_2_2.c Normal file
View File

@@ -0,0 +1,149 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-hw:" fmt
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
#include "dsi_catalog.h"
#define DISP_CC_MISC_CMD_REG_OFF 0x00
/* register to configure DMA scheduling */
#define DSI_DMA_SCHEDULE_CTRL 0x100
/**
* dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
* @ctrl: Pointer to the controller host hardware.
* @enable: boolean to specify enable/disable.
*/
void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
bool enable)
{
u32 reg = 0;
reg = DSI_DISP_CC_R32(ctrl, DISP_CC_MISC_CMD_REG_OFF);
/* Mask/unmask disable PHY reset bit */
if (enable)
reg &= ~BIT(ctrl->index);
else
reg |= BIT(ctrl->index);
DSI_DISP_CC_W32(ctrl, DISP_CC_MISC_CMD_REG_OFF, reg);
}
/**
* dsi_ctrl_hw_22_schedule_dma_cmd() - to schedule DMA command transfer
* @ctrl: Pointer to the controller host hardware.
* @line_no: Line number at which command needs to be sent.
*/
void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_no)
{
u32 reg = 0;
reg = DSI_R32(ctrl, DSI_DMA_SCHEDULE_CTRL);
reg |= BIT(28);
reg |= (line_no & 0xffff);
DSI_W32(ctrl, DSI_DMA_SCHEDULE_CTRL, reg);
}
/*
* dsi_ctrl_hw_22_get_cont_splash_status() - to verify whether continuous
* splash is enabled or not
* @ctrl: Pointer to the controller host hardware.
*
* Return: Return Continuous splash status
*/
bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl)
{
u32 reg = 0;
/**
* DSI scratch register 1 is used to notify whether continuous
* splash is enabled or not by bootloader
*/
reg = DSI_R32(ctrl, DSI_SCRATCH_REGISTER_1);
return reg == 0x1;
}
/*
* dsi_ctrl_hw_kickoff_non_embedded_mode()-Kickoff cmd in non-embedded mode
* @ctrl: - Pointer to the controller host hardware.
* @dsi_ctrl_cmd_dma_info: - command buffer information.
* @flags: - DSI CTRL Flags.
*/
void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags)
{
u32 reg = 0;
reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
reg &= ~BIT(31);/* disable broadcast */
reg &= ~BIT(30);
if (cmd->use_lpm)
reg |= BIT(26);
else
reg &= ~BIT(26);
/* Select non EMBEDDED_MODE, pick the packet header from register */
reg &= ~BIT(28);
reg |= BIT(24);/* long packet */
reg |= BIT(29);/* wc_sel = 1 */
reg |= (((cmd->datatype) & 0x03f) << 16);/* data type */
DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
/* Enable WRITE_WATERMARK_DISABLE and READ_WATERMARK_DISABLE bits */
reg = DSI_R32(ctrl, DSI_DMA_FIFO_CTRL);
reg |= BIT(20);
reg |= BIT(16);
reg |= 0x33;/* Set READ and WRITE watermark levels to maximum */
DSI_W32(ctrl, DSI_DMA_FIFO_CTRL, reg);
DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, ((cmd->length) & 0xFFFFFF));
/* wait for writes to complete before kick off */
wmb();
if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
}
/*
* dsi_ctrl_hw_22_config_clk_gating() - enable/disable clk gating on DSI PHY
* @ctrl: Pointer to the controller host hardware.
* @enable: bool to notify enable/disable.
* @clk_selection: clock to enable/disable clock gating.
*
*/
void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
enum dsi_clk_gate_type clk_selection)
{
u32 reg = 0;
u32 enable_select = 0;
reg = DSI_DISP_CC_R32(ctrl, DISP_CC_MISC_CMD_REG_OFF);
if (clk_selection & PIXEL_CLK)
enable_select |= ctrl->index ? BIT(6) : BIT(5);
if (clk_selection & BYTE_CLK)
enable_select |= ctrl->index ? BIT(8) : BIT(7);
if (clk_selection & DSI_PHY)
enable_select |= ctrl->index ? BIT(10) : BIT(9);
if (enable)
reg |= enable_select;
else
reg &= ~enable_select;
DSI_DISP_CC_W32(ctrl, DISP_CC_MISC_CMD_REG_OFF, reg);
}

1540
msm/dsi/dsi_ctrl_hw_cmn.c Normal file

File diff suppressed because it is too large Load Diff

187
msm/dsi/dsi_ctrl_reg.h Normal file
View File

@@ -0,0 +1,187 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_CTRL_REG_H_
#define _DSI_CTRL_REG_H_
#define DSI_HW_VERSION (0x0000)
#define DSI_CTRL (0x0004)
#define DSI_STATUS (0x0008)
#define DSI_FIFO_STATUS (0x000C)
#define DSI_VIDEO_MODE_CTRL (0x0010)
#define DSI_VIDEO_MODE_SYNC_DATATYPE (0x0014)
#define DSI_VIDEO_MODE_PIXEL_DATATYPE (0x0018)
#define DSI_VIDEO_MODE_BLANKING_DATATYPE (0x001C)
#define DSI_VIDEO_MODE_DATA_CTRL (0x0020)
#define DSI_VIDEO_MODE_ACTIVE_H (0x0024)
#define DSI_VIDEO_MODE_ACTIVE_V (0x0028)
#define DSI_VIDEO_MODE_TOTAL (0x002C)
#define DSI_VIDEO_MODE_HSYNC (0x0030)
#define DSI_VIDEO_MODE_VSYNC (0x0034)
#define DSI_VIDEO_MODE_VSYNC_VPOS (0x0038)
#define DSI_COMMAND_MODE_DMA_CTRL (0x003C)
#define DSI_COMMAND_MODE_MDP_CTRL (0x0040)
#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL (0x0044)
#define DSI_DMA_CMD_OFFSET (0x0048)
#define DSI_DMA_CMD_LENGTH (0x004C)
#define DSI_DMA_FIFO_CTRL (0x0050)
#define DSI_DMA_NULL_PACKET_DATA (0x0054)
#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL (0x0058)
#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL (0x005C)
#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL (0x0060)
#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL (0x0064)
#define DSI_ACK_ERR_STATUS (0x0068)
#define DSI_RDBK_DATA0 (0x006C)
#define DSI_RDBK_DATA1 (0x0070)
#define DSI_RDBK_DATA2 (0x0074)
#define DSI_RDBK_DATA3 (0x0078)
#define DSI_RDBK_DATATYPE0 (0x007C)
#define DSI_RDBK_DATATYPE1 (0x0080)
#define DSI_TRIG_CTRL (0x0084)
#define DSI_EXT_MUX (0x0088)
#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL (0x008C)
#define DSI_CMD_MODE_DMA_SW_TRIGGER (0x0090)
#define DSI_CMD_MODE_MDP_SW_TRIGGER (0x0094)
#define DSI_CMD_MODE_BTA_SW_TRIGGER (0x0098)
#define DSI_RESET_SW_TRIGGER (0x009C)
#define DSI_MISR_CMD_CTRL (0x00A0)
#define DSI_MISR_VIDEO_CTRL (0x00A4)
#define DSI_LANE_STATUS (0x00A8)
#define DSI_LANE_CTRL (0x00AC)
#define DSI_LANE_SWAP_CTRL (0x00B0)
#define DSI_DLN0_PHY_ERR (0x00B4)
#define DSI_LP_TIMER_CTRL (0x00B8)
#define DSI_HS_TIMER_CTRL (0x00BC)
#define DSI_TIMEOUT_STATUS (0x00C0)
#define DSI_CLKOUT_TIMING_CTRL (0x00C4)
#define DSI_EOT_PACKET (0x00C8)
#define DSI_EOT_PACKET_CTRL (0x00CC)
#define DSI_GENERIC_ESC_TX_TRIGGER (0x00D0)
#define DSI_CAM_BIST_CTRL (0x00D4)
#define DSI_CAM_BIST_FRAME_SIZE (0x00D8)
#define DSI_CAM_BIST_BLOCK_SIZE (0x00DC)
#define DSI_CAM_BIST_FRAME_CONFIG (0x00E0)
#define DSI_CAM_BIST_LSFR_CTRL (0x00E4)
#define DSI_CAM_BIST_LSFR_INIT (0x00E8)
#define DSI_CAM_BIST_START (0x00EC)
#define DSI_CAM_BIST_STATUS (0x00F0)
#define DSI_ERR_INT_MASK0 (0x010C)
#define DSI_INT_CTRL (0x0110)
#define DSI_IOBIST_CTRL (0x0114)
#define DSI_SOFT_RESET (0x0118)
#define DSI_CLK_CTRL (0x011C)
#define DSI_CLK_STATUS (0x0120)
#define DSI_DEBUG_BUS_CTL (0x0124)
#define DSI_DEBUG_BUS_STATUS (0x0128)
#define DSI_PHY_SW_RESET (0x012C)
#define DSI_AXI2AHB_CTRL (0x0130)
#define DSI_MISR_CMD_MDP0_32BIT (0x0134)
#define DSI_MISR_CMD_MDP1_32BIT (0x0138)
#define DSI_MISR_CMD_DMA_32BIT (0x013C)
#define DSI_MISR_VIDEO_32BIT (0x0140)
#define DSI_LANE_MISR_CTRL (0x0144)
#define DSI_LANE0_MISR (0x0148)
#define DSI_LANE1_MISR (0x014C)
#define DSI_LANE2_MISR (0x0150)
#define DSI_LANE3_MISR (0x0154)
#define DSI_TEST_PATTERN_GEN_CTRL (0x015C)
#define DSI_TEST_PATTERN_GEN_VIDEO_POLY (0x0160)
#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL (0x0164)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY (0x0168)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 (0x016C)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY (0x0170)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1 (0x0174)
#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY (0x0178)
#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL (0x017C)
#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE (0x0180)
#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER (0x0184)
#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER (0x0188)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2 (0x018C)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
#define DSI_COMMAND_MODE_MDP_IDLE_CTRL (0x0194)
#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER (0x0198)
#define DSI_TPG_MAIN_CONTROL (0x019C)
#define DSI_TPG_MAIN_CONTROL2 (0x01A0)
#define DSI_TPG_VIDEO_CONFIG (0x01A4)
#define DSI_TPG_COMPONENT_LIMITS (0x01A8)
#define DSI_TPG_RECTANGLE (0x01AC)
#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES (0x01B0)
#define DSI_TPG_RGB_MAPPING (0x01B4)
#define DSI_COMMAND_MODE_MDP_CTRL2 (0x01B8)
#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL (0x01BC)
#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL (0x01C0)
#define DSI_MISR_CMD_MDP2_8BIT (0x01C4)
#define DSI_MISR_CMD_MDP2_32BIT (0x01C8)
#define DSI_VBIF_CTRL (0x01CC)
#define DSI_AES_CTRL (0x01D0)
#define DSI_RDBK_DATA_CTRL (0x01D4)
#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2 (0x01D8)
#define DSI_TPG_DMA_FIFO_STATUS (0x01DC)
#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER (0x01E0)
#define DSI_DSI_TIMING_FLUSH (0x01E4)
#define DSI_DSI_TIMING_DB_MODE (0x01E8)
#define DSI_TPG_DMA_FIFO_RESET (0x01EC)
#define DSI_SCRATCH_REGISTER_0 (0x01F0)
#define DSI_VERSION (0x01F4)
#define DSI_SCRATCH_REGISTER_1 (0x01F8)
#define DSI_SCRATCH_REGISTER_2 (0x01FC)
#define DSI_DYNAMIC_REFRESH_CTRL (0x0200)
#define DSI_DYNAMIC_REFRESH_PIPE_DELAY (0x0204)
#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 (0x0208)
#define DSI_DYNAMIC_REFRESH_PLL_DELAY (0x020C)
#define DSI_DYNAMIC_REFRESH_STATUS (0x0210)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x0214)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x0218)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x021C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x0220)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x0224)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x0228)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x022C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x0230)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x0234)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x0238)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x023C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x0240)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x0244)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x0248)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x024C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x0250)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x0254)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x0258)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x025C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x0260)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x0264)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x0268)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x026C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x0270)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x0274)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x0278)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x027C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x0280)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x0284)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x0288)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x028C)
#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x0290)
#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x0294)
#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x0298)
#define DSI_VIDEO_COMPRESSION_MODE_CTRL (0x02A0)
#define DSI_VIDEO_COMPRESSION_MODE_CTRL2 (0x02A4)
#define DSI_COMMAND_COMPRESSION_MODE_CTRL (0x02A8)
#define DSI_COMMAND_COMPRESSION_MODE_CTRL2 (0x02AC)
#define DSI_COMMAND_COMPRESSION_MODE_CTRL3 (0x02B0)
#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL (0x02B4)
#define DSI_READ_BACK_DISABLE_STATUS (0x02B8)
#define DSI_DESKEW_CTRL (0x02BC)
#define DSI_DESKEW_DELAY_CTRL (0x02C0)
#define DSI_DESKEW_SW_TRIGGER (0x02C4)
#define DSI_DEBUG_CTRL (0x02C8)
#define DSI_SECURE_DISPLAY_STATUS (0x02CC)
#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR (0x02D0)
#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR (0x02D4)
#define DSI_LOGICAL_LANE_SWAP_CTRL (0x0310)
#endif /* _DSI_CTRL_REG_H_ */

625
msm/dsi/dsi_defs.h Normal file
View File

@@ -0,0 +1,625 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_DEFS_H_
#define _DSI_DEFS_H_
#include <linux/types.h>
#include <drm/drm_mipi_dsi.h>
#include "msm_drv.h"
#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
((t)->h_sync_width) + ((t)->h_front_porch))
#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
((t)->v_sync_width) + ((t)->v_front_porch))
#define DSI_H_TOTAL_DSC(t) \
({\
u64 value;\
if ((t)->dsc_enabled && (t)->dsc)\
value = (t)->dsc->pclk_per_line;\
else\
value = (t)->h_active;\
value = value + (t)->h_back_porch + (t)->h_sync_width +\
(t)->h_front_porch;\
value;\
})
#define DSI_DEBUG_NAME_LEN 32
#define display_for_each_ctrl(index, display) \
for (index = 0; (index < (display)->ctrl_count) &&\
(index < MAX_DSI_CTRLS_PER_DISPLAY); index++)
/**
* enum dsi_pixel_format - DSI pixel formats
* @DSI_PIXEL_FORMAT_RGB565:
* @DSI_PIXEL_FORMAT_RGB666:
* @DSI_PIXEL_FORMAT_RGB666_LOOSE:
* @DSI_PIXEL_FORMAT_RGB888:
* @DSI_PIXEL_FORMAT_RGB111:
* @DSI_PIXEL_FORMAT_RGB332:
* @DSI_PIXEL_FORMAT_RGB444:
* @DSI_PIXEL_FORMAT_MAX:
*/
enum dsi_pixel_format {
DSI_PIXEL_FORMAT_RGB565 = 0,
DSI_PIXEL_FORMAT_RGB666,
DSI_PIXEL_FORMAT_RGB666_LOOSE,
DSI_PIXEL_FORMAT_RGB888,
DSI_PIXEL_FORMAT_RGB111,
DSI_PIXEL_FORMAT_RGB332,
DSI_PIXEL_FORMAT_RGB444,
DSI_PIXEL_FORMAT_MAX
};
/**
* enum dsi_op_mode - dsi operation mode
* @DSI_OP_VIDEO_MODE: DSI video mode operation
* @DSI_OP_CMD_MODE: DSI Command mode operation
* @DSI_OP_MODE_MAX:
*/
enum dsi_op_mode {
DSI_OP_VIDEO_MODE = 0,
DSI_OP_CMD_MODE,
DSI_OP_MODE_MAX
};
/**
* enum dsi_mode_flags - flags to signal other drm components via private flags
* @DSI_MODE_FLAG_SEAMLESS: Seamless transition requested by user
* @DSI_MODE_FLAG_DFPS: Seamless transition is DynamicFPS
* @DSI_MODE_FLAG_VBLANK_PRE_MODESET: Transition needs VBLANK before Modeset
* @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
* @DSI_MODE_FLAG_VRR: Seamless transition is DynamicFPS.
* New timing values are sent from DAL.
*/
enum dsi_mode_flags {
DSI_MODE_FLAG_SEAMLESS = BIT(0),
DSI_MODE_FLAG_DFPS = BIT(1),
DSI_MODE_FLAG_VBLANK_PRE_MODESET = BIT(2),
DSI_MODE_FLAG_DMS = BIT(3),
DSI_MODE_FLAG_VRR = BIT(4),
};
/**
* enum dsi_logical_lane - dsi logical lanes
* @DSI_LOGICAL_LANE_0: Logical lane 0
* @DSI_LOGICAL_LANE_1: Logical lane 1
* @DSI_LOGICAL_LANE_2: Logical lane 2
* @DSI_LOGICAL_LANE_3: Logical lane 3
* @DSI_LOGICAL_CLOCK_LANE: Clock lane
* @DSI_LANE_MAX: Maximum lanes supported
*/
enum dsi_logical_lane {
DSI_LOGICAL_LANE_0 = 0,
DSI_LOGICAL_LANE_1,
DSI_LOGICAL_LANE_2,
DSI_LOGICAL_LANE_3,
DSI_LOGICAL_CLOCK_LANE,
DSI_LANE_MAX
};
/**
* enum dsi_data_lanes - BIT map for DSI data lanes
* This is used to identify the active DSI data lanes for
* various operations like DSI data lane enable/ULPS/clamp
* configurations.
* @DSI_DATA_LANE_0: BIT(DSI_LOGICAL_LANE_0)
* @DSI_DATA_LANE_1: BIT(DSI_LOGICAL_LANE_1)
* @DSI_DATA_LANE_2: BIT(DSI_LOGICAL_LANE_2)
* @DSI_DATA_LANE_3: BIT(DSI_LOGICAL_LANE_3)
* @DSI_CLOCK_LANE: BIT(DSI_LOGICAL_CLOCK_LANE)
*/
enum dsi_data_lanes {
DSI_DATA_LANE_0 = BIT(DSI_LOGICAL_LANE_0),
DSI_DATA_LANE_1 = BIT(DSI_LOGICAL_LANE_1),
DSI_DATA_LANE_2 = BIT(DSI_LOGICAL_LANE_2),
DSI_DATA_LANE_3 = BIT(DSI_LOGICAL_LANE_3),
DSI_CLOCK_LANE = BIT(DSI_LOGICAL_CLOCK_LANE)
};
/**
* enum dsi_phy_data_lanes - dsi physical lanes
* used for DSI logical to physical lane mapping
* @DSI_PHYSICAL_LANE_INVALID: Physical lane valid/invalid
* @DSI_PHYSICAL_LANE_0: Physical lane 0
* @DSI_PHYSICAL_LANE_1: Physical lane 1
* @DSI_PHYSICAL_LANE_2: Physical lane 2
* @DSI_PHYSICAL_LANE_3: Physical lane 3
*/
enum dsi_phy_data_lanes {
DSI_PHYSICAL_LANE_INVALID = 0,
DSI_PHYSICAL_LANE_0 = BIT(0),
DSI_PHYSICAL_LANE_1 = BIT(1),
DSI_PHYSICAL_LANE_2 = BIT(2),
DSI_PHYSICAL_LANE_3 = BIT(3)
};
enum dsi_lane_map_type_v1 {
DSI_LANE_MAP_0123,
DSI_LANE_MAP_3012,
DSI_LANE_MAP_2301,
DSI_LANE_MAP_1230,
DSI_LANE_MAP_0321,
DSI_LANE_MAP_1032,
DSI_LANE_MAP_2103,
DSI_LANE_MAP_3210,
};
/**
* lane_map: DSI logical <-> physical lane mapping
* lane_map_v1: Lane mapping for DSI controllers < v2.0
* lane_map_v2: Lane mapping for DSI controllers >= 2.0
*/
struct dsi_lane_map {
enum dsi_lane_map_type_v1 lane_map_v1;
u8 lane_map_v2[DSI_LANE_MAX - 1];
};
/**
* enum dsi_trigger_type - dsi trigger type
* @DSI_TRIGGER_NONE: No trigger.
* @DSI_TRIGGER_TE: TE trigger.
* @DSI_TRIGGER_SEOF: Start or End of frame.
* @DSI_TRIGGER_SW: Software trigger.
* @DSI_TRIGGER_SW_SEOF: Software trigger and start/end of frame.
* @DSI_TRIGGER_SW_TE: Software and TE triggers.
* @DSI_TRIGGER_MAX: Max trigger values.
*/
enum dsi_trigger_type {
DSI_TRIGGER_NONE = 0,
DSI_TRIGGER_TE,
DSI_TRIGGER_SEOF,
DSI_TRIGGER_SW,
DSI_TRIGGER_SW_SEOF,
DSI_TRIGGER_SW_TE,
DSI_TRIGGER_MAX
};
/**
* enum dsi_color_swap_mode - color swap mode
* @DSI_COLOR_SWAP_RGB:
* @DSI_COLOR_SWAP_RBG:
* @DSI_COLOR_SWAP_BGR:
* @DSI_COLOR_SWAP_BRG:
* @DSI_COLOR_SWAP_GRB:
* @DSI_COLOR_SWAP_GBR:
*/
enum dsi_color_swap_mode {
DSI_COLOR_SWAP_RGB = 0,
DSI_COLOR_SWAP_RBG,
DSI_COLOR_SWAP_BGR,
DSI_COLOR_SWAP_BRG,
DSI_COLOR_SWAP_GRB,
DSI_COLOR_SWAP_GBR
};
/**
* enum dsi_dfps_type - Dynamic FPS support type
* @DSI_DFPS_NONE: Dynamic FPS is not supported.
* @DSI_DFPS_SUSPEND_RESUME:
* @DSI_DFPS_IMMEDIATE_CLK:
* @DSI_DFPS_IMMEDIATE_HFP:
* @DSI_DFPS_IMMEDIATE_VFP:
* @DSI_DPFS_MAX:
*/
enum dsi_dfps_type {
DSI_DFPS_NONE = 0,
DSI_DFPS_SUSPEND_RESUME,
DSI_DFPS_IMMEDIATE_CLK,
DSI_DFPS_IMMEDIATE_HFP,
DSI_DFPS_IMMEDIATE_VFP,
DSI_DFPS_MAX
};
/**
* enum dsi_cmd_set_type - DSI command set type
* @DSI_CMD_SET_PRE_ON: Panel pre on
* @DSI_CMD_SET_ON: Panel on
* @DSI_CMD_SET_POST_ON: Panel post on
* @DSI_CMD_SET_PRE_OFF: Panel pre off
* @DSI_CMD_SET_OFF: Panel off
* @DSI_CMD_SET_POST_OFF: Panel post off
* @DSI_CMD_SET_PRE_RES_SWITCH: Pre resolution switch
* @DSI_CMD_SET_RES_SWITCH: Resolution switch
* @DSI_CMD_SET_POST_RES_SWITCH: Post resolution switch
* @DSI_CMD_SET_CMD_TO_VID_SWITCH: Cmd to video mode switch
* @DSI_CMD_SET_POST_CMD_TO_VID_SWITCH: Post cmd to vid switch
* @DSI_CMD_SET_VID_TO_CMD_SWITCH: Video to cmd mode switch
* @DSI_CMD_SET_POST_VID_TO_CMD_SWITCH: Post vid to cmd switch
* @DSI_CMD_SET_PANEL_STATUS: Panel status
* @DSI_CMD_SET_LP1: Low power mode 1
* @DSI_CMD_SET_LP2: Low power mode 2
* @DSI_CMD_SET_NOLP: Low power mode disable
* @DSI_CMD_SET_PPS: DSC PPS command
* @DSI_CMD_SET_ROI: Panel ROI update
* @DSI_CMD_SET_TIMING_SWITCH: Timing switch
* @DSI_CMD_SET_POST_TIMING_SWITCH: Post timing switch
* @DSI_CMD_SET_QSYNC_ON Enable qsync mode
* @DSI_CMD_SET_QSYNC_OFF Disable qsync mode
* @DSI_CMD_SET_MAX
*/
enum dsi_cmd_set_type {
DSI_CMD_SET_PRE_ON = 0,
DSI_CMD_SET_ON,
DSI_CMD_SET_POST_ON,
DSI_CMD_SET_PRE_OFF,
DSI_CMD_SET_OFF,
DSI_CMD_SET_POST_OFF,
DSI_CMD_SET_PRE_RES_SWITCH,
DSI_CMD_SET_RES_SWITCH,
DSI_CMD_SET_POST_RES_SWITCH,
DSI_CMD_SET_CMD_TO_VID_SWITCH,
DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
DSI_CMD_SET_VID_TO_CMD_SWITCH,
DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
DSI_CMD_SET_PANEL_STATUS,
DSI_CMD_SET_LP1,
DSI_CMD_SET_LP2,
DSI_CMD_SET_NOLP,
DSI_CMD_SET_PPS,
DSI_CMD_SET_ROI,
DSI_CMD_SET_TIMING_SWITCH,
DSI_CMD_SET_POST_TIMING_SWITCH,
DSI_CMD_SET_QSYNC_ON,
DSI_CMD_SET_QSYNC_OFF,
DSI_CMD_SET_MAX
};
/**
* enum dsi_cmd_set_state - command set state
* @DSI_CMD_SET_STATE_LP: dsi low power mode
* @DSI_CMD_SET_STATE_HS: dsi high speed mode
* @DSI_CMD_SET_STATE_MAX
*/
enum dsi_cmd_set_state {
DSI_CMD_SET_STATE_LP = 0,
DSI_CMD_SET_STATE_HS,
DSI_CMD_SET_STATE_MAX
};
/**
* enum dsi_clk_gate_type - Type of clock to be gated.
* @PIXEL_CLK: DSI pixel clock.
* @BYTE_CLK: DSI byte clock.
* @DSI_PHY: DSI PHY.
*/
enum dsi_clk_gate_type {
PIXEL_CLK = 1,
BYTE_CLK = 2,
DSI_PHY = 4,
};
/**
* enum dsi_phy_type - DSI phy types
* @DSI_PHY_TYPE_DPHY:
* @DSI_PHY_TYPE_CPHY:
*/
enum dsi_phy_type {
DSI_PHY_TYPE_DPHY,
DSI_PHY_TYPE_CPHY
};
/**
* enum dsi_te_mode - dsi te source
* @DSI_TE_ON_DATA_LINK: TE read from DSI link
* @DSI_TE_ON_EXT_PIN: TE signal on an external GPIO
*/
enum dsi_te_mode {
DSI_TE_ON_DATA_LINK = 0,
DSI_TE_ON_EXT_PIN,
};
/**
* enum dsi_video_traffic_mode - video mode pixel transmission type
* @DSI_VIDEO_TRAFFIC_SYNC_PULSES: Non-burst mode with sync pulses.
* @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
* @DSI_VIDEO_TRAFFIC_BURST_MODE: Burst mode using sync start events.
*/
enum dsi_video_traffic_mode {
DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
DSI_VIDEO_TRAFFIC_BURST_MODE,
};
/**
* struct dsi_cmd_desc - description of a dsi command
* @msg: dsi mipi msg packet
* @last_command: indicates whether the cmd is the last one to send
* @post_wait_ms: post wait duration
*/
struct dsi_cmd_desc {
struct mipi_dsi_msg msg;
bool last_command;
u32 post_wait_ms;
};
/**
* struct dsi_panel_cmd_set - command set of the panel
* @type: type of the command
* @state: state of the command
* @count: number of cmds
* @ctrl_idx: index of the dsi control
* @cmds: arry of cmds
*/
struct dsi_panel_cmd_set {
enum dsi_cmd_set_type type;
enum dsi_cmd_set_state state;
u32 count;
u32 ctrl_idx;
struct dsi_cmd_desc *cmds;
};
/**
* struct dsi_mode_info - video mode information dsi frame
* @h_active: Active width of one frame in pixels.
* @h_back_porch: Horizontal back porch in pixels.
* @h_sync_width: HSYNC width in pixels.
* @h_front_porch: Horizontal fron porch in pixels.
* @h_skew:
* @h_sync_polarity: Polarity of HSYNC (false is active low).
* @v_active: Active height of one frame in lines.
* @v_back_porch: Vertical back porch in lines.
* @v_sync_width: VSYNC width in lines.
* @v_front_porch: Vertical front porch in lines.
* @v_sync_polarity: Polarity of VSYNC (false is active low).
* @refresh_rate: Refresh rate in Hz.
* @clk_rate_hz: DSI bit clock rate per lane in Hz.
* @mdp_transfer_time_us: Specifies the mdp transfer time for command mode
* panels in microseconds.
* @dsc_enabled: DSC compression enabled.
* @dsc: DSC compression configuration.
* @roi_caps: Panel ROI capabilities.
*/
struct dsi_mode_info {
u32 h_active;
u32 h_back_porch;
u32 h_sync_width;
u32 h_front_porch;
u32 h_skew;
bool h_sync_polarity;
u32 v_active;
u32 v_back_porch;
u32 v_sync_width;
u32 v_front_porch;
bool v_sync_polarity;
u32 refresh_rate;
u64 clk_rate_hz;
u32 mdp_transfer_time_us;
bool dsc_enabled;
struct msm_display_dsc_info *dsc;
struct msm_roi_caps roi_caps;
};
/**
* struct dsi_host_common_cfg - Host configuration common to video and cmd mode
* @dst_format: Destination pixel format.
* @data_lanes: Physical data lanes to be enabled.
* @en_crc_check: Enable CRC checks.
* @en_ecc_check: Enable ECC checks.
* @te_mode: Source for TE signalling.
* @mdp_cmd_trigger: MDP frame update trigger for command mode.
* @dma_cmd_trigger: Command DMA trigger.
* @cmd_trigger_stream: Command mode stream to trigger.
* @swap_mode: DSI color swap mode.
* @bit_swap_read: Is red color bit swapped.
* @bit_swap_green: Is green color bit swapped.
* @bit_swap_blue: Is blue color bit swapped.
* @t_clk_post: Number of byte clock cycles that the transmitter shall
* continue sending after last data lane has transitioned
* to LP mode.
* @t_clk_pre: Number of byte clock cycles that the high spped clock
* shall be driven prior to data lane transitions from LP
* to HS mode.
* @ignore_rx_eot: Ignore Rx EOT packets if set to true.
* @append_tx_eot: Append EOT packets for forward transmissions if set to
* true.
* @ext_bridge_mode: External bridge is connected.
* @force_hs_clk_lane: Send continuous clock to the panel.
*/
struct dsi_host_common_cfg {
enum dsi_pixel_format dst_format;
enum dsi_data_lanes data_lanes;
bool en_crc_check;
bool en_ecc_check;
enum dsi_te_mode te_mode;
enum dsi_trigger_type mdp_cmd_trigger;
enum dsi_trigger_type dma_cmd_trigger;
u32 cmd_trigger_stream;
enum dsi_color_swap_mode swap_mode;
bool bit_swap_red;
bool bit_swap_green;
bool bit_swap_blue;
u32 t_clk_post;
u32 t_clk_pre;
bool ignore_rx_eot;
bool append_tx_eot;
bool ext_bridge_mode;
bool force_hs_clk_lane;
};
/**
* struct dsi_video_engine_cfg - DSI video engine configuration
* @last_line_interleave_en: Allow command mode op interleaved on last line of
* video stream.
* @pulse_mode_hsa_he: Send HSA and HE following VS/VE packet if set to
* true.
* @hfp_lp11_en: Enter low power stop mode (LP-11) during HFP.
* @hbp_lp11_en: Enter low power stop mode (LP-11) during HBP.
* @hsa_lp11_en: Enter low power stop mode (LP-11) during HSA.
* @eof_bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP of
* last line of a frame.
* @bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP.
* @traffic_mode: Traffic mode for video stream.
* @vc_id: Virtual channel identifier.
* @dma_sched_line: Line number, after vactive end, at which command dma
* needs to be triggered.
*/
struct dsi_video_engine_cfg {
bool last_line_interleave_en;
bool pulse_mode_hsa_he;
bool hfp_lp11_en;
bool hbp_lp11_en;
bool hsa_lp11_en;
bool eof_bllp_lp11_en;
bool bllp_lp11_en;
bool force_clk_lane_hs;
enum dsi_video_traffic_mode traffic_mode;
u32 vc_id;
u32 dma_sched_line;
};
/**
* struct dsi_cmd_engine_cfg - DSI command engine configuration
* @max_cmd_packets_interleave Maximum number of command mode RGB packets to
* send with in one horizontal blanking period
* of the video mode frame.
* @wr_mem_start: DCS command for write_memory_start.
* @wr_mem_continue: DCS command for write_memory_continue.
* @insert_dcs_command: Insert DCS command as first byte of payload
* of the pixel data.
*/
struct dsi_cmd_engine_cfg {
u32 max_cmd_packets_interleave;
u32 wr_mem_start;
u32 wr_mem_continue;
bool insert_dcs_command;
};
/**
* struct dsi_host_config - DSI host configuration parameters.
* @panel_mode: Operation mode for panel (video or cmd mode).
* @common_config: Host configuration common to both Video and Cmd mode.
* @video_engine: Video engine configuration if panel is in video mode.
* @cmd_engine: Cmd engine configuration if panel is in cmd mode.
* @esc_clk_rate_khz: Esc clock frequency in Hz.
* @bit_clk_rate_hz: Bit clock frequency in Hz.
* @bit_clk_rate_hz_override: DSI bit clk rate override from dt/sysfs.
* @video_timing: Video timing information of a frame.
* @lane_map: Mapping between logical and physical lanes.
*/
struct dsi_host_config {
enum dsi_op_mode panel_mode;
struct dsi_host_common_cfg common_config;
union {
struct dsi_video_engine_cfg video_engine;
struct dsi_cmd_engine_cfg cmd_engine;
} u;
u64 esc_clk_rate_hz;
u64 bit_clk_rate_hz;
u64 bit_clk_rate_hz_override;
struct dsi_mode_info video_timing;
struct dsi_lane_map lane_map;
};
/**
* struct dsi_display_mode_priv_info - private mode info that will be attached
* with each drm mode
* @cmd_sets: Command sets of the mode
* @phy_timing_val: Phy timing values
* @phy_timing_len: Phy timing array length
* @panel_jitter: Panel jitter for RSC backoff
* @panel_prefill_lines: Panel prefill lines for RSC
* @mdp_transfer_time_us: Specifies the mdp transfer time for command mode
* panels in microseconds.
* @clk_rate_hz: DSI bit clock per lane in hz.
* @topology: Topology selected for the panel
* @dsc: DSC compression info
* @dsc_enabled: DSC compression enabled
* @roi_caps: Panel ROI capabilities
*/
struct dsi_display_mode_priv_info {
struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
u32 *phy_timing_val;
u32 phy_timing_len;
u32 panel_jitter_numer;
u32 panel_jitter_denom;
u32 panel_prefill_lines;
u32 mdp_transfer_time_us;
u64 clk_rate_hz;
struct msm_display_topology topology;
struct msm_display_dsc_info dsc;
bool dsc_enabled;
struct msm_roi_caps roi_caps;
};
/**
* struct dsi_display_mode - specifies mode for dsi display
* @timing: Timing parameters for the panel.
* @pixel_clk_khz: Pixel clock in Khz.
* @dsi_mode_flags: Flags to signal other drm components via private flags
* @priv_info: Mode private info
*/
struct dsi_display_mode {
struct dsi_mode_info timing;
u32 pixel_clk_khz;
u32 dsi_mode_flags;
struct dsi_display_mode_priv_info *priv_info;
};
/**
* struct dsi_rect - dsi rectangle representation
* Note: sde_rect is also using u16, this must be maintained for memcpy
*/
struct dsi_rect {
u16 x;
u16 y;
u16 w;
u16 h;
};
/**
* dsi_rect_intersect - intersect two rectangles
* @r1: first rectangle
* @r2: scissor rectangle
* @result: result rectangle, all 0's on no intersection found
*/
void dsi_rect_intersect(const struct dsi_rect *r1,
const struct dsi_rect *r2,
struct dsi_rect *result);
/**
* dsi_rect_is_equal - compares two rects
* @r1: rect value to compare
* @r2: rect value to compare
*
* Returns true if the rects are same
*/
static inline bool dsi_rect_is_equal(struct dsi_rect *r1,
struct dsi_rect *r2)
{
return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
r1->h == r2->h;
}
struct dsi_event_cb_info {
uint32_t event_idx;
void *event_usr_ptr;
int (*event_cb)(void *event_usr_ptr,
uint32_t event_idx, uint32_t instance_idx,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3);
};
/**
* enum dsi_error_status - various dsi errors
* @DSI_FIFO_OVERFLOW: DSI FIFO Overflow error
* @DSI_FIFO_UNDERFLOW: DSI FIFO Underflow error
* @DSI_LP_Rx_TIMEOUT: DSI LP/RX Timeout error
*/
enum dsi_error_status {
DSI_FIFO_OVERFLOW = 1,
DSI_FIFO_UNDERFLOW,
DSI_LP_Rx_TIMEOUT,
DSI_ERR_INTR_ALL,
};
#endif /* _DSI_DEFS_H_ */

7014
msm/dsi/dsi_display.c Normal file

File diff suppressed because it is too large Load Diff

699
msm/dsi/dsi_display.h Normal file
View File

@@ -0,0 +1,699 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation.All rights reserved.
*/
#ifndef _DSI_DISPLAY_H_
#define _DSI_DISPLAY_H_
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/of_device.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
#include "dsi_defs.h"
#include "dsi_ctrl.h"
#include "dsi_phy.h"
#include "dsi_panel.h"
#define MAX_DSI_CTRLS_PER_DISPLAY 2
#define DSI_CLIENT_NAME_SIZE 20
#define MAX_CMDLINE_PARAM_LEN 512
#define MAX_CMD_PAYLOAD_SIZE 256
/*
* DSI Validate Mode modifiers
* @DSI_VALIDATE_FLAG_ALLOW_ADJUST: Allow mode validation to also do fixup
*/
#define DSI_VALIDATE_FLAG_ALLOW_ADJUST 0x1
/**
* enum dsi_display_selection_type - enumerates DSI display selection types
* @DSI_PRIMARY: primary DSI display selected from module parameter
* @DSI_SECONDARY: Secondary DSI display selected from module parameter
* @MAX_DSI_ACTIVE_DISPLAY: Maximum acive displays that can be selected
*/
enum dsi_display_selection_type {
DSI_PRIMARY = 0,
DSI_SECONDARY,
MAX_DSI_ACTIVE_DISPLAY,
};
/**
* enum dsi_display_type - enumerates DSI display types
* @DSI_DISPLAY_SINGLE: A panel connected on a single DSI interface.
* @DSI_DISPLAY_EXT_BRIDGE: A bridge is connected between panel and DSI host.
* It utilizes a single DSI interface.
* @DSI_DISPLAY_SPLIT: A panel that utilizes more than one DSI
* interfaces.
* @DSI_DISPLAY_SPLIT_EXT_BRIDGE: A bridge is present between panel and DSI
* host. It utilizes more than one DSI interface.
*/
enum dsi_display_type {
DSI_DISPLAY_SINGLE = 0,
DSI_DISPLAY_EXT_BRIDGE,
DSI_DISPLAY_SPLIT,
DSI_DISPLAY_SPLIT_EXT_BRIDGE,
DSI_DISPLAY_MAX,
};
/**
* struct dsi_display_ctrl - dsi ctrl/phy information for the display
* @ctrl: Handle to the DSI controller device.
* @ctrl_of_node: pHandle to the DSI controller device.
* @dsi_ctrl_idx: DSI controller instance id.
* @power_state: Current power state of the DSI controller.
* @phy: Handle to the DSI PHY device.
* @phy_of_node: pHandle to the DSI PHY device.
* @phy_enabled: PHY power status.
*/
struct dsi_display_ctrl {
/* controller info */
struct dsi_ctrl *ctrl;
struct device_node *ctrl_of_node;
u32 dsi_ctrl_idx;
enum dsi_power_state power_state;
/* phy info */
struct msm_dsi_phy *phy;
struct device_node *phy_of_node;
bool phy_enabled;
};
/**
* struct dsi_display_boot_param - defines DSI boot display selection
* @name:Name of DSI display selected as a boot param.
* @boot_disp_en:bool to indicate dtsi availability of display node
* @is_primary:bool to indicate whether current display is primary display
* @length:length of DSI display.
* @cmdline_topology: Display topology shared from kernel command line.
*/
struct dsi_display_boot_param {
char name[MAX_CMDLINE_PARAM_LEN];
char *boot_param;
bool boot_disp_en;
int length;
struct device_node *node;
int cmdline_topology;
void *disp;
};
/**
* struct dsi_display_clk_info - dsi display clock source information
* @src_clks: Source clocks for DSI display.
* @mux_clks: Mux clocks used for DFPS.
* @shadow_clks: Used for DFPS.
*/
struct dsi_display_clk_info {
struct dsi_clk_link_set src_clks;
struct dsi_clk_link_set mux_clks;
struct dsi_clk_link_set shadow_clks;
};
/**
* struct dsi_display_ext_bridge - dsi display external bridge information
* @display: Pointer of DSI display.
* @node_of: Bridge node created from bridge driver.
* @bridge: Bridge created from bridge driver
* @orig_funcs: Bridge function from bridge driver (split mode only)
* @bridge_funcs: Overridden function from bridge driver (split mode only)
*/
struct dsi_display_ext_bridge {
void *display;
struct device_node *node_of;
struct drm_bridge *bridge;
const struct drm_bridge_funcs *orig_funcs;
struct drm_bridge_funcs bridge_funcs;
};
/**
* struct dsi_display - dsi display information
* @pdev: Pointer to platform device.
* @drm_dev: DRM device associated with the display.
* @drm_conn: Pointer to DRM connector associated with the display
* @ext_conn: Pointer to external connector attached to DSI connector
* @name: Name of the display.
* @display_type: Display type as defined in device tree.
* @list: List pointer.
* @is_active: Is display active.
* @is_cont_splash_enabled: Is continuous splash enabled
* @sw_te_using_wd: Is software te enabled
* @display_lock: Mutex for dsi_display interface.
* @disp_te_gpio: GPIO for panel TE interrupt.
* @is_te_irq_enabled:bool to specify whether TE interrupt is enabled.
* @esd_te_gate: completion gate to signal TE interrupt.
* @ctrl_count: Number of DSI interfaces required by panel.
* @ctrl: Controller information for DSI display.
* @panel: Handle to DSI panel.
* @panel_node: pHandle to DSI panel actually in use.
* @ext_bridge: External bridge information for DSI display.
* @ext_bridge_cnt: Number of external bridges
* @modes: Array of probed DSI modes
* @type: DSI display type.
* @clk_master_idx: The master controller for controlling clocks. This is an
* index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
* @cmd_master_idx: The master controller for sending DSI commands to panel.
* @video_master_idx: The master controller for enabling video engine.
* @cached_clk_rate: The cached DSI clock rate set dynamically by sysfs.
* @clkrate_change_pending: Flag indicating the pending DSI clock re-enabling.
* @clock_info: Clock sourcing for DSI display.
* @config: DSI host configuration information.
* @lane_map: Lane mapping between DSI host and Panel.
* @cmdline_topology: Display topology shared from kernel command line.
* @cmdline_timing: Display timing shared from kernel command line.
* @is_tpg_enabled: TPG state.
* @ulps_enabled: ulps state.
* @clamp_enabled: clamp state.
* @phy_idle_power_off: PHY power state.
* @host: DRM MIPI DSI Host.
* @bridge: Pointer to DRM bridge object.
* @cmd_engine_refcount: Reference count enforcing single instance of cmd eng
* @clk_mngr: DSI clock manager.
* @dsi_clk_handle: DSI clock handle.
* @mdp_clk_handle: MDP clock handle.
* @root: Debugfs root directory
* @misr_enable Frame MISR enable/disable
* @misr_frame_count Number of frames to accumulate the MISR value
* @esd_trigger field indicating ESD trigger through debugfs
* @te_source vsync source pin information
*/
struct dsi_display {
struct platform_device *pdev;
struct drm_device *drm_dev;
struct drm_connector *drm_conn;
struct drm_connector *ext_conn;
const char *name;
const char *display_type;
struct list_head list;
bool is_cont_splash_enabled;
bool sw_te_using_wd;
struct mutex display_lock;
int disp_te_gpio;
bool is_te_irq_enabled;
struct completion esd_te_gate;
u32 ctrl_count;
struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
/* panel info */
struct dsi_panel *panel;
struct device_node *panel_node;
struct device_node *parser_node;
/* external bridge */
struct dsi_display_ext_bridge ext_bridge[MAX_DSI_CTRLS_PER_DISPLAY];
u32 ext_bridge_cnt;
struct dsi_display_mode *modes;
enum dsi_display_type type;
u32 clk_master_idx;
u32 cmd_master_idx;
u32 video_master_idx;
/* dynamic DSI clock info*/
u32 cached_clk_rate;
atomic_t clkrate_change_pending;
struct dsi_display_clk_info clock_info;
struct dsi_host_config config;
struct dsi_lane_map lane_map;
int cmdline_topology;
int cmdline_timing;
bool is_tpg_enabled;
bool ulps_enabled;
bool clamp_enabled;
bool phy_idle_power_off;
struct drm_gem_object *tx_cmd_buf;
u32 cmd_buffer_size;
u64 cmd_buffer_iova;
void *vaddr;
struct msm_gem_address_space *aspace;
struct mipi_dsi_host host;
struct dsi_bridge *bridge;
u32 cmd_engine_refcount;
struct sde_power_handle *phandle;
struct sde_power_client *cont_splash_client;
void *clk_mngr;
void *dsi_clk_handle;
void *mdp_clk_handle;
/* DEBUG FS */
struct dentry *root;
bool misr_enable;
u32 misr_frame_count;
u32 esd_trigger;
/* multiple dsi error handlers */
struct workqueue_struct *err_workq;
struct work_struct fifo_underflow_work;
struct work_struct fifo_overflow_work;
struct work_struct lp_rx_timeout_work;
/* firmware panel data */
const struct firmware *fw;
void *parser;
struct dsi_display_boot_param *boot_disp;
u32 te_source;
};
int dsi_display_dev_probe(struct platform_device *pdev);
int dsi_display_dev_remove(struct platform_device *pdev);
/**
* dsi_display_get_num_of_displays() - returns number of display devices
* supported.
*
* Return: number of displays.
*/
int dsi_display_get_num_of_displays(void);
/**
* dsi_display_get_active_displays - returns pointers for active display devices
* @display_array: Pointer to display array to be filled
* @max_display_count: Size of display_array
* @Returns: Number of display entries filled
*/
int dsi_display_get_active_displays(void **display_array,
u32 max_display_count);
/**
* dsi_display_get_display_by_name()- finds display by name
* @name: name of the display.
*
* Return: handle to the display or error code.
*/
struct dsi_display *dsi_display_get_display_by_name(const char *name);
/**
* dsi_display_set_active_state() - sets the state of the display
* @display: Handle to display.
* @is_active: state
*/
void dsi_display_set_active_state(struct dsi_display *display, bool is_active);
/**
* dsi_display_drm_bridge_init() - initializes DRM bridge object for DSI
* @display: Handle to the display.
* @encoder: Pointer to the encoder object which is connected to the
* display.
*
* Return: error code.
*/
int dsi_display_drm_bridge_init(struct dsi_display *display,
struct drm_encoder *enc);
/**
* dsi_display_drm_bridge_deinit() - destroys DRM bridge for the display
* @display: Handle to the display.
*
* Return: error code.
*/
int dsi_display_drm_bridge_deinit(struct dsi_display *display);
/**
* dsi_display_drm_ext_bridge_init() - initializes DRM bridge for ext bridge
* @display: Handle to the display.
* @enc: Pointer to the encoder object which is connected to the
* display.
* @connector: Pointer to the connector object which is connected to
* the display.
*
* Return: error code.
*/
int dsi_display_drm_ext_bridge_init(struct dsi_display *display,
struct drm_encoder *enc, struct drm_connector *connector);
/**
* dsi_display_get_info() - returns the display properties
* @connector: Pointer to drm connector structure
* @info: Pointer to the structure where info is stored.
* @disp: Handle to the display.
*
* Return: error code.
*/
int dsi_display_get_info(struct drm_connector *connector,
struct msm_display_info *info, void *disp);
/**
* dsi_display_get_mode_count() - get number of modes supported by the display
* @display: Handle to display.
* @count: Number of modes supported
*
* Return: error code.
*/
int dsi_display_get_mode_count(struct dsi_display *display, u32 *count);
/**
* dsi_display_get_modes() - get modes supported by display
* @display: Handle to display.
* @modes; Output param, list of DSI modes. Number of modes matches
* count returned by dsi_display_get_mode_count
*
* Return: error code.
*/
int dsi_display_get_modes(struct dsi_display *display,
struct dsi_display_mode **modes);
/**
* dsi_display_put_mode() - free up mode created for the display
* @display: Handle to display.
* @mode: Display mode to be freed up
*
* Return: error code.
*/
void dsi_display_put_mode(struct dsi_display *display,
struct dsi_display_mode *mode);
/**
* dsi_display_get_default_lms() - retrieve max number of lms used
* for dsi display by traversing through all topologies
* @display: Handle to display.
* @num_lm: Number of LMs used
*
* Return: error code.
*/
int dsi_display_get_default_lms(void *dsi_display, u32 *num_lm);
/**
* dsi_display_find_mode() - retrieve cached DSI mode given relevant params
* @display: Handle to display.
* @cmp: Mode to use as comparison to find original
* @out_mode: Output parameter, pointer to retrieved mode
*
* Return: error code.
*/
int dsi_display_find_mode(struct dsi_display *display,
const struct dsi_display_mode *cmp,
struct dsi_display_mode **out_mode);
/**
* dsi_display_validate_mode() - validates if mode is supported by display
* @display: Handle to display.
* @mode: Mode to be validated.
* @flags: Modifier flags.
*
* Return: 0 if supported or error code.
*/
int dsi_display_validate_mode(struct dsi_display *display,
struct dsi_display_mode *mode,
u32 flags);
/**
* dsi_display_validate_mode_vrr() - validates mode if variable refresh case
* @display: Handle to display.
* @mode: Mode to be validated..
*
* Return: 0 if error code.
*/
int dsi_display_validate_mode_vrr(struct dsi_display *display,
struct dsi_display_mode *cur_dsi_mode,
struct dsi_display_mode *mode);
/**
* dsi_display_set_mode() - Set mode on the display.
* @display: Handle to display.
* @mode: mode to be set.
* @flags: Modifier flags.
*
* Return: error code.
*/
int dsi_display_set_mode(struct dsi_display *display,
struct dsi_display_mode *mode,
u32 flags);
/**
* dsi_display_prepare() - prepare display
* @display: Handle to display.
*
* Prepare will perform power up sequences for the host and panel hardware.
* Power and clock resources might be turned on (depending on the panel mode).
* The video engine is not enabled.
*
* Return: error code.
*/
int dsi_display_prepare(struct dsi_display *display);
/**
* dsi_display_splash_res_cleanup() - cleanup for continuous splash
* @display: Pointer to dsi display
* Returns: Zero on success
*/
int dsi_display_splash_res_cleanup(struct dsi_display *display);
/**
* dsi_display_config_ctrl_for_cont_splash()- Enable engine modes for DSI
* controller during continuous splash
* @display: Handle to DSI display
*
* Return: returns error code
*/
int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display);
/**
* dsi_display_enable() - enable display
* @display: Handle to display.
*
* Enable will turn on the host engine and the panel. At the end of the enable
* function, Host and panel hardware are ready to accept pixel data from
* upstream.
*
* Return: error code.
*/
int dsi_display_enable(struct dsi_display *display);
/**
* dsi_display_post_enable() - perform post enable operations.
* @display: Handle to display.
*
* Some panels might require some commands to be sent after pixel data
* transmission has started. Such commands are sent as part of the post_enable
* function.
*
* Return: error code.
*/
int dsi_display_post_enable(struct dsi_display *display);
/**
* dsi_display_pre_disable() - perform pre disable operations.
* @display: Handle to display.
*
* If a panel requires commands to be sent before pixel data transmission is
* stopped, those can be sent as part of pre_disable.
*
* Return: error code.
*/
int dsi_display_pre_disable(struct dsi_display *display);
/**
* dsi_display_disable() - disable panel and host hardware.
* @display: Handle to display.
*
* Disable host and panel hardware and pixel data transmission can not continue.
*
* Return: error code.
*/
int dsi_display_disable(struct dsi_display *display);
/**
* dsi_pre_clkoff_cb() - Callback before clock is turned off
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
int dsi_pre_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state new_state);
/**
* dsi_display_update_pps() - update PPS buffer.
* @pps_cmd: PPS buffer.
* @display: Handle to display.
*
* Copies new PPS buffer into display structure.
*
* Return: error code.
*/
int dsi_display_update_pps(char *pps_cmd, void *display);
/**
* dsi_post_clkoff_cb() - Callback after clock is turned off
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
int dsi_post_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state);
/**
* dsi_post_clkon_cb() - Callback after clock is turned on
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
int dsi_post_clkon_cb(void *priv, enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state);
/**
* dsi_pre_clkon_cb() - Callback before clock is turned on
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
* @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
int dsi_pre_clkon_cb(void *priv, enum dsi_clk_type clk_type,
enum dsi_lclk_type l_type,
enum dsi_clk_state new_state);
/**
* dsi_display_unprepare() - power off display hardware.
* @display: Handle to display.
*
* Host and panel hardware is turned off. Panel will be in reset state at the
* end of the function.
*
* Return: error code.
*/
int dsi_display_unprepare(struct dsi_display *display);
int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
int dsi_display_clock_gate(struct dsi_display *display, bool enable);
int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
/**
* dsi_display_enable_event() - enable interrupt based connector event
* @connector: Pointer to drm connector structure
* @display: Handle to display.
* @event_idx: Event index.
* @event_info: Event callback definition.
* @enable: Whether to enable/disable the event interrupt.
*/
void dsi_display_enable_event(struct drm_connector *connector,
struct dsi_display *display,
uint32_t event_idx, struct dsi_event_cb_info *event_info,
bool enable);
/**
* dsi_display_set_backlight() - set backlight
* @connector: Pointer to drm connector structure
* @display: Handle to display.
* @bl_lvl: Backlight level.
* @event_info: Event callback definition.
* @enable: Whether to enable/disable the event interrupt.
*/
int dsi_display_set_backlight(struct drm_connector *connector,
void *display, u32 bl_lvl);
/**
* dsi_display_check_status() - check if panel is dead or alive
* @connector: Pointer to drm connector structure
* @display: Handle to display.
* @te_check_override: Whether check for TE from panel or default check
*/
int dsi_display_check_status(struct drm_connector *connector, void *display,
bool te_check_override);
/**
* dsi_display_cmd_transfer() - transfer command to the panel
* @connector: Pointer to drm connector structure
* @display: Handle to display.
* @cmd_buf: Command buffer
* @cmd_buf_len: Command buffer length in bytes
*/
int dsi_display_cmd_transfer(struct drm_connector *connector,
void *display, const char *cmd_buffer,
u32 cmd_buf_len);
/**
* dsi_display_soft_reset() - perform a soft reset on DSI controller
* @display: Handle to display
*
* The video, command and controller engines will be disabled before the
* reset is triggered. After, the engines will be re-enabled to the same state
* as before the reset.
*
* If the reset is done while MDP timing engine is turned on, the video
* engine should be re-enabled only during the vertical blanking time.
*
* Return: error code
*/
int dsi_display_soft_reset(void *display);
/**
* dsi_display_set_power - update power/dpms setting
* @connector: Pointer to drm connector structure
* @power_mode: One of the following,
* SDE_MODE_DPMS_ON
* SDE_MODE_DPMS_LP1
* SDE_MODE_DPMS_LP2
* SDE_MODE_DPMS_STANDBY
* SDE_MODE_DPMS_SUSPEND
* SDE_MODE_DPMS_OFF
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int dsi_display_set_power(struct drm_connector *connector,
int power_mode, void *display);
/*
* dsi_display_pre_kickoff - program kickoff-time features
* @connector: Pointer to drm connector structure
* @display: Pointer to private display structure
* @params: Parameters for kickoff-time programming
* Returns: Zero on success
*/
int dsi_display_pre_kickoff(struct drm_connector *connector,
struct dsi_display *display,
struct msm_display_kickoff_params *params);
/**
* dsi_display_get_dst_format() - get dst_format from DSI display
* @connector: Pointer to drm connector structure
* @display: Handle to display
*
* Return: enum dsi_pixel_format type
*/
enum dsi_pixel_format dsi_display_get_dst_format(
struct drm_connector *connector,
void *display);
/**
* dsi_display_cont_splash_config() - initialize splash resources
* @display: Handle to display
*
* Return: Zero on Success
*/
int dsi_display_cont_splash_config(void *display);
/*
* dsi_display_get_panel_vfp - get panel vsync
* @display: Pointer to private display structure
* @h_active: width
* @v_active: height
* Returns: v_front_porch on success error code on failure
*/
int dsi_display_get_panel_vfp(void *display,
int h_active, int v_active);
#endif /* _DSI_DISPLAY_H_ */

View File

@@ -0,0 +1,96 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "dsi_display_test.h"
static void dsi_display_test_dump_modes(struct dsi_display_mode *mode, u32
count)
{
}
static void dsi_display_test_work(struct work_struct *work)
{
struct dsi_display_test *test;
struct dsi_display *display;
struct dsi_display_mode *modes;
u32 count = 0;
int rc = 0;
test = container_of(work, struct dsi_display_test, test_work);
display = test->display;
rc = dsi_display_get_mode_count(display, &count);
if (rc) {
pr_err("failed to get modes count, rc=%d\n", rc);
goto test_fail;
}
rc = dsi_display_get_modes(display, &modes);
if (rc) {
pr_err("failed to get modes, rc=%d\n", rc);
goto test_fail_free_modes;
}
dsi_display_test_dump_modes(modes, count);
rc = dsi_display_set_mode(display, &modes[0], 0x0);
if (rc) {
pr_err("failed to set mode, rc=%d\n", rc);
goto test_fail_free_modes;
}
rc = dsi_display_prepare(display);
if (rc) {
pr_err("failed to prepare display, rc=%d\n", rc);
goto test_fail_free_modes;
}
rc = dsi_display_enable(display);
if (rc) {
pr_err("failed to enable display, rc=%d\n", rc);
goto test_fail_unprep_disp;
}
return;
test_fail_unprep_disp:
if (rc) {
pr_err("failed to unprep display, rc=%d\n", rc);
goto test_fail_free_modes;
}
test_fail_free_modes:
kfree(modes);
test_fail:
return;
}
int dsi_display_test_init(struct dsi_display *display)
{
static int done;
int rc = 0;
struct dsi_display_test *test;
if (done)
return rc;
done = 1;
if (!display) {
pr_err("Invalid params\n");
return -EINVAL;
}
test = kzalloc(sizeof(*test), GFP_KERNEL);
if (!test)
return -ENOMEM;
test->display = display;
INIT_WORK(&test->test_work, dsi_display_test_work);
dsi_display_test_work(&test->test_work);
return rc;
}

View File

@@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_DISPLAY_TEST_H_
#define _DSI_DISPLAY_TEST_H_
#include "dsi_display.h"
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl.h"
struct dsi_display_test {
struct dsi_display *display;
struct work_struct test_work;
};
int dsi_display_test_init(struct dsi_display *display);
#endif /* _DSI_DISPLAY_TEST_H_ */

927
msm/dsi/dsi_drm.c Normal file
View File

@@ -0,0 +1,927 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-drm:[%s] " fmt, __func__
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include "msm_kms.h"
#include "sde_connector.h"
#include "dsi_drm.h"
#include "sde_trace.h"
#define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base)
#define to_dsi_state(x) container_of((x), struct dsi_connector_state, base)
#define DEFAULT_PANEL_JITTER_NUMERATOR 2
#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
#define DEFAULT_PANEL_JITTER_ARRAY_SIZE 2
#define DEFAULT_PANEL_PREFILL_LINES 25
static struct dsi_display_mode_priv_info default_priv_info = {
.panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR,
.panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR,
.panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES,
.dsc_enabled = false,
};
static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
struct dsi_display_mode *dsi_mode)
{
memset(dsi_mode, 0, sizeof(*dsi_mode));
dsi_mode->timing.h_active = drm_mode->hdisplay;
dsi_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
dsi_mode->timing.h_sync_width = drm_mode->htotal -
(drm_mode->hsync_start + dsi_mode->timing.h_back_porch);
dsi_mode->timing.h_front_porch = drm_mode->hsync_start -
drm_mode->hdisplay;
dsi_mode->timing.h_skew = drm_mode->hskew;
dsi_mode->timing.v_active = drm_mode->vdisplay;
dsi_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
dsi_mode->timing.v_sync_width = drm_mode->vtotal -
(drm_mode->vsync_start + dsi_mode->timing.v_back_porch);
dsi_mode->timing.v_front_porch = drm_mode->vsync_start -
drm_mode->vdisplay;
dsi_mode->timing.refresh_rate = drm_mode->vrefresh;
dsi_mode->pixel_clk_khz = drm_mode->clock;
dsi_mode->priv_info =
(struct dsi_display_mode_priv_info *)drm_mode->private;
if (dsi_mode->priv_info) {
dsi_mode->timing.dsc_enabled = dsi_mode->priv_info->dsc_enabled;
dsi_mode->timing.dsc = &dsi_mode->priv_info->dsc;
}
if (msm_is_mode_seamless(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_SEAMLESS;
if (msm_is_mode_dynamic_fps(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DFPS;
if (msm_needs_vblank_pre_modeset(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
if (msm_is_mode_seamless_dms(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
if (msm_is_mode_seamless_vrr(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
dsi_mode->timing.h_sync_polarity =
!!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC);
dsi_mode->timing.v_sync_polarity =
!!(drm_mode->flags & DRM_MODE_FLAG_PVSYNC);
}
void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
struct drm_display_mode *drm_mode)
{
memset(drm_mode, 0, sizeof(*drm_mode));
drm_mode->hdisplay = dsi_mode->timing.h_active;
drm_mode->hsync_start = drm_mode->hdisplay +
dsi_mode->timing.h_front_porch;
drm_mode->hsync_end = drm_mode->hsync_start +
dsi_mode->timing.h_sync_width;
drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
drm_mode->hskew = dsi_mode->timing.h_skew;
drm_mode->vdisplay = dsi_mode->timing.v_active;
drm_mode->vsync_start = drm_mode->vdisplay +
dsi_mode->timing.v_front_porch;
drm_mode->vsync_end = drm_mode->vsync_start +
dsi_mode->timing.v_sync_width;
drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
drm_mode->clock = dsi_mode->pixel_clk_khz;
drm_mode->private = (int *)dsi_mode->priv_info;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)
drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DFPS)
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DMS)
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
if (dsi_mode->timing.h_sync_polarity)
drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dsi_mode->timing.v_sync_polarity)
drm_mode->flags |= DRM_MODE_FLAG_PVSYNC;
drm_mode_set_name(drm_mode);
}
static int dsi_bridge_attach(struct drm_bridge *bridge)
{
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
if (!bridge) {
pr_err("Invalid params\n");
return -EINVAL;
}
pr_debug("[%d] attached\n", c_bridge->id);
return 0;
}
static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
{
int rc = 0;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
if (!bridge) {
pr_err("Invalid params\n");
return;
}
if (!c_bridge || !c_bridge->display || !c_bridge->display->panel) {
pr_err("Incorrect bridge details\n");
return;
}
atomic_set(&c_bridge->display->panel->esd_recovery_pending, 0);
/* By this point mode should have been validated through mode_fixup */
rc = dsi_display_set_mode(c_bridge->display,
&(c_bridge->dsi_mode), 0x0);
if (rc) {
pr_err("[%d] failed to perform a mode set, rc=%d\n",
c_bridge->id, rc);
return;
}
if (c_bridge->dsi_mode.dsi_mode_flags &
(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
return;
}
SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
rc = dsi_display_prepare(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display prepare failed, rc=%d\n",
c_bridge->id, rc);
SDE_ATRACE_END("dsi_bridge_pre_enable");
return;
}
SDE_ATRACE_BEGIN("dsi_display_enable");
rc = dsi_display_enable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display enable failed, rc=%d\n",
c_bridge->id, rc);
(void)dsi_display_unprepare(c_bridge->display);
}
SDE_ATRACE_END("dsi_display_enable");
SDE_ATRACE_END("dsi_bridge_pre_enable");
rc = dsi_display_splash_res_cleanup(c_bridge->display);
if (rc)
pr_err("Continuous splash pipeline cleanup failed, rc=%d\n",
rc);
}
static void dsi_bridge_enable(struct drm_bridge *bridge)
{
int rc = 0;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
struct dsi_display *display;
if (!bridge) {
pr_err("Invalid params\n");
return;
}
if (c_bridge->dsi_mode.dsi_mode_flags &
(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
pr_debug("[%d] seamless enable\n", c_bridge->id);
return;
}
display = c_bridge->display;
rc = dsi_display_post_enable(display);
if (rc)
pr_err("[%d] DSI display post enabled failed, rc=%d\n",
c_bridge->id, rc);
if (display && display->drm_conn)
sde_connector_helper_bridge_enable(display->drm_conn);
}
static void dsi_bridge_disable(struct drm_bridge *bridge)
{
int rc = 0;
struct dsi_display *display;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
if (!bridge) {
pr_err("Invalid params\n");
return;
}
display = c_bridge->display;
if (display && display->drm_conn)
sde_connector_helper_bridge_disable(display->drm_conn);
rc = dsi_display_pre_disable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display pre disable failed, rc=%d\n",
c_bridge->id, rc);
}
}
static void dsi_bridge_post_disable(struct drm_bridge *bridge)
{
int rc = 0;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
if (!bridge) {
pr_err("Invalid params\n");
return;
}
SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
SDE_ATRACE_BEGIN("dsi_display_disable");
rc = dsi_display_disable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display disable failed, rc=%d\n",
c_bridge->id, rc);
SDE_ATRACE_END("dsi_display_disable");
return;
}
SDE_ATRACE_END("dsi_display_disable");
rc = dsi_display_unprepare(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display unprepare failed, rc=%d\n",
c_bridge->id, rc);
SDE_ATRACE_END("dsi_bridge_post_disable");
return;
}
SDE_ATRACE_END("dsi_bridge_post_disable");
}
static void dsi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
if (!bridge || !mode || !adjusted_mode) {
pr_err("Invalid params\n");
return;
}
memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
}
static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
int rc = 0;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
struct dsi_display *display;
struct dsi_display_mode dsi_mode, cur_dsi_mode, *panel_dsi_mode;
struct drm_display_mode cur_mode;
struct drm_crtc_state *crtc_state;
crtc_state = container_of(mode, struct drm_crtc_state, mode);
if (!bridge || !mode || !adjusted_mode) {
pr_err("Invalid params\n");
return false;
}
display = c_bridge->display;
if (!display) {
pr_err("Invalid params\n");
return false;
}
/*
* if no timing defined in panel, it must be external mode
* and we'll use empty priv info to populate the mode
*/
if (display->panel && !display->panel->num_timing_nodes) {
*adjusted_mode = *mode;
adjusted_mode->private = (int *)&default_priv_info;
adjusted_mode->private_flags = 0;
return true;
}
convert_to_dsi_mode(mode, &dsi_mode);
/*
* retrieve dsi mode from dsi driver's cache since not safe to take
* the drm mode config mutex in all paths
*/
rc = dsi_display_find_mode(display, &dsi_mode, &panel_dsi_mode);
if (rc)
return rc;
/* propagate the private info to the adjusted_mode derived dsi mode */
dsi_mode.priv_info = panel_dsi_mode->priv_info;
dsi_mode.dsi_mode_flags = panel_dsi_mode->dsi_mode_flags;
dsi_mode.timing.dsc_enabled = dsi_mode.priv_info->dsc_enabled;
dsi_mode.timing.dsc = &dsi_mode.priv_info->dsc;
rc = dsi_display_validate_mode(c_bridge->display, &dsi_mode,
DSI_VALIDATE_FLAG_ALLOW_ADJUST);
if (rc) {
pr_err("[%d] mode is not valid, rc=%d\n", c_bridge->id, rc);
return false;
}
if (bridge->encoder && bridge->encoder->crtc &&
crtc_state->crtc) {
convert_to_dsi_mode(&crtc_state->crtc->state->mode,
&cur_dsi_mode);
cur_dsi_mode.timing.dsc_enabled =
dsi_mode.priv_info->dsc_enabled;
cur_dsi_mode.timing.dsc = &dsi_mode.priv_info->dsc;
rc = dsi_display_validate_mode_vrr(c_bridge->display,
&cur_dsi_mode, &dsi_mode);
if (rc)
pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
c_bridge->display->name, rc);
cur_mode = crtc_state->crtc->mode;
/* No DMS/VRR when drm pipeline is changing */
if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
(!crtc_state->active_changed ||
display->is_cont_splash_enabled))
dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
}
/* convert back to drm mode, propagating the private info & flags */
dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);
return true;
}
int dsi_conn_get_mode_info(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info,
u32 max_mixer_width, void *display)
{
struct dsi_display_mode dsi_mode;
struct dsi_mode_info *timing;
if (!drm_mode || !mode_info)
return -EINVAL;
convert_to_dsi_mode(drm_mode, &dsi_mode);
if (!dsi_mode.priv_info)
return -EINVAL;
memset(mode_info, 0, sizeof(*mode_info));
timing = &dsi_mode.timing;
mode_info->frame_rate = dsi_mode.timing.refresh_rate;
mode_info->vtotal = DSI_V_TOTAL(timing);
mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
mode_info->clk_rate = dsi_mode.priv_info->clk_rate_hz;
mode_info->mdp_transfer_time_us =
dsi_mode.priv_info->mdp_transfer_time_us;
memcpy(&mode_info->topology, &dsi_mode.priv_info->topology,
sizeof(struct msm_display_topology));
mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
if (dsi_mode.priv_info->dsc_enabled) {
mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_DSC;
memcpy(&mode_info->comp_info.dsc_info, &dsi_mode.priv_info->dsc,
sizeof(dsi_mode.priv_info->dsc));
mode_info->comp_info.comp_ratio =
MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1;
}
if (dsi_mode.priv_info->roi_caps.enabled) {
memcpy(&mode_info->roi_caps, &dsi_mode.priv_info->roi_caps,
sizeof(dsi_mode.priv_info->roi_caps));
}
return 0;
}
static const struct drm_bridge_funcs dsi_bridge_ops = {
.attach = dsi_bridge_attach,
.mode_fixup = dsi_bridge_mode_fixup,
.pre_enable = dsi_bridge_pre_enable,
.enable = dsi_bridge_enable,
.disable = dsi_bridge_disable,
.post_disable = dsi_bridge_post_disable,
.mode_set = dsi_bridge_mode_set,
};
int dsi_conn_set_info_blob(struct drm_connector *connector,
void *info, void *display, struct msm_mode_info *mode_info)
{
struct dsi_display *dsi_display = display;
struct dsi_panel *panel;
enum dsi_pixel_format fmt;
u32 bpp;
if (!info || !dsi_display)
return -EINVAL;
dsi_display->drm_conn = connector;
sde_kms_info_add_keystr(info,
"display type", dsi_display->display_type);
switch (dsi_display->type) {
case DSI_DISPLAY_SINGLE:
sde_kms_info_add_keystr(info, "display config",
"single display");
break;
case DSI_DISPLAY_EXT_BRIDGE:
sde_kms_info_add_keystr(info, "display config", "ext bridge");
break;
case DSI_DISPLAY_SPLIT:
sde_kms_info_add_keystr(info, "display config",
"split display");
break;
case DSI_DISPLAY_SPLIT_EXT_BRIDGE:
sde_kms_info_add_keystr(info, "display config",
"split ext bridge");
break;
default:
pr_debug("invalid display type:%d\n", dsi_display->type);
break;
}
if (!dsi_display->panel) {
pr_debug("invalid panel data\n");
goto end;
}
panel = dsi_display->panel;
sde_kms_info_add_keystr(info, "panel name", panel->name);
switch (panel->panel_mode) {
case DSI_OP_VIDEO_MODE:
sde_kms_info_add_keystr(info, "panel mode", "video");
sde_kms_info_add_keystr(info, "qsync support",
panel->qsync_min_fps ? "true" : "false");
break;
case DSI_OP_CMD_MODE:
sde_kms_info_add_keystr(info, "panel mode", "command");
sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
mode_info->mdp_transfer_time_us);
sde_kms_info_add_keystr(info, "qsync support",
panel->qsync_min_fps ? "true" : "false");
break;
default:
pr_debug("invalid panel type:%d\n", panel->panel_mode);
break;
}
sde_kms_info_add_keystr(info, "dfps support",
panel->dfps_caps.dfps_support ? "true" : "false");
if (panel->dfps_caps.dfps_support) {
sde_kms_info_add_keyint(info, "min_fps",
panel->dfps_caps.min_refresh_rate);
sde_kms_info_add_keyint(info, "max_fps",
panel->dfps_caps.max_refresh_rate);
}
switch (panel->phy_props.rotation) {
case DSI_PANEL_ROTATE_NONE:
sde_kms_info_add_keystr(info, "panel orientation", "none");
break;
case DSI_PANEL_ROTATE_H_FLIP:
sde_kms_info_add_keystr(info, "panel orientation", "horz flip");
break;
case DSI_PANEL_ROTATE_V_FLIP:
sde_kms_info_add_keystr(info, "panel orientation", "vert flip");
break;
case DSI_PANEL_ROTATE_HV_FLIP:
sde_kms_info_add_keystr(info, "panel orientation",
"horz & vert flip");
break;
default:
pr_debug("invalid panel rotation:%d\n",
panel->phy_props.rotation);
break;
}
switch (panel->bl_config.type) {
case DSI_BACKLIGHT_PWM:
sde_kms_info_add_keystr(info, "backlight type", "pwm");
break;
case DSI_BACKLIGHT_WLED:
sde_kms_info_add_keystr(info, "backlight type", "wled");
break;
case DSI_BACKLIGHT_DCS:
sde_kms_info_add_keystr(info, "backlight type", "dcs");
break;
default:
pr_debug("invalid panel backlight type:%d\n",
panel->bl_config.type);
break;
}
if (mode_info && mode_info->roi_caps.enabled) {
sde_kms_info_add_keyint(info, "partial_update_num_roi",
mode_info->roi_caps.num_roi);
sde_kms_info_add_keyint(info, "partial_update_xstart",
mode_info->roi_caps.align.xstart_pix_align);
sde_kms_info_add_keyint(info, "partial_update_walign",
mode_info->roi_caps.align.width_pix_align);
sde_kms_info_add_keyint(info, "partial_update_wmin",
mode_info->roi_caps.align.min_width);
sde_kms_info_add_keyint(info, "partial_update_ystart",
mode_info->roi_caps.align.ystart_pix_align);
sde_kms_info_add_keyint(info, "partial_update_halign",
mode_info->roi_caps.align.height_pix_align);
sde_kms_info_add_keyint(info, "partial_update_hmin",
mode_info->roi_caps.align.min_height);
sde_kms_info_add_keyint(info, "partial_update_roimerge",
mode_info->roi_caps.merge_rois);
}
fmt = dsi_display->config.common_config.dst_format;
bpp = dsi_ctrl_pixel_format_to_bpp(fmt);
sde_kms_info_add_keyint(info, "bit_depth", bpp);
end:
return 0;
}
enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
bool force,
void *display)
{
enum drm_connector_status status = connector_status_unknown;
struct msm_display_info info;
int rc;
if (!conn || !display)
return status;
/* get display dsi_info */
memset(&info, 0x0, sizeof(info));
rc = dsi_display_get_info(conn, &info, display);
if (rc) {
pr_err("failed to get display info, rc=%d\n", rc);
return connector_status_disconnected;
}
if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
status = (info.is_connected ? connector_status_connected :
connector_status_disconnected);
else
status = connector_status_connected;
conn->display_info.width_mm = info.width_mm;
conn->display_info.height_mm = info.height_mm;
return status;
}
void dsi_connector_put_modes(struct drm_connector *connector,
void *display)
{
struct drm_display_mode *drm_mode;
struct dsi_display_mode dsi_mode;
struct dsi_display *dsi_display;
if (!connector || !display)
return;
list_for_each_entry(drm_mode, &connector->modes, head) {
convert_to_dsi_mode(drm_mode, &dsi_mode);
dsi_display_put_mode(display, &dsi_mode);
}
/* free the display structure modes also */
dsi_display = display;
kfree(dsi_display->modes);
dsi_display->modes = NULL;
}
static int dsi_drm_update_edid_name(struct edid *edid, const char *name)
{
u8 *dtd = (u8 *)&edid->detailed_timings[3];
u8 standard_header[] = {0x00, 0x00, 0x00, 0xFE, 0x00};
u32 dtd_size = 18;
u32 header_size = sizeof(standard_header);
if (!name)
return -EINVAL;
/* Fill standard header */
memcpy(dtd, standard_header, header_size);
dtd_size -= header_size;
dtd_size = min_t(u32, dtd_size, strlen(name));
memcpy(dtd + header_size, name, dtd_size);
return 0;
}
static void dsi_drm_update_dtd(struct edid *edid,
struct dsi_display_mode *modes, u32 modes_count)
{
u32 i;
u32 count = min_t(u32, modes_count, 3);
for (i = 0; i < count; i++) {
struct detailed_timing *dtd = &edid->detailed_timings[i];
struct dsi_display_mode *mode = &modes[i];
struct dsi_mode_info *timing = &mode->timing;
struct detailed_pixel_timing *pd = &dtd->data.pixel_data;
u32 h_blank = timing->h_front_porch + timing->h_sync_width +
timing->h_back_porch;
u32 v_blank = timing->v_front_porch + timing->v_sync_width +
timing->v_back_porch;
u32 h_img = 0, v_img = 0;
dtd->pixel_clock = mode->pixel_clk_khz / 10;
pd->hactive_lo = timing->h_active & 0xFF;
pd->hblank_lo = h_blank & 0xFF;
pd->hactive_hblank_hi = ((h_blank >> 8) & 0xF) |
((timing->h_active >> 8) & 0xF) << 4;
pd->vactive_lo = timing->v_active & 0xFF;
pd->vblank_lo = v_blank & 0xFF;
pd->vactive_vblank_hi = ((v_blank >> 8) & 0xF) |
((timing->v_active >> 8) & 0xF) << 4;
pd->hsync_offset_lo = timing->h_front_porch & 0xFF;
pd->hsync_pulse_width_lo = timing->h_sync_width & 0xFF;
pd->vsync_offset_pulse_width_lo =
((timing->v_front_porch & 0xF) << 4) |
(timing->v_sync_width & 0xF);
pd->hsync_vsync_offset_pulse_width_hi =
(((timing->h_front_porch >> 8) & 0x3) << 6) |
(((timing->h_sync_width >> 8) & 0x3) << 4) |
(((timing->v_front_porch >> 4) & 0x3) << 2) |
(((timing->v_sync_width >> 4) & 0x3) << 0);
pd->width_mm_lo = h_img & 0xFF;
pd->height_mm_lo = v_img & 0xFF;
pd->width_height_mm_hi = (((h_img >> 8) & 0xF) << 4) |
((v_img >> 8) & 0xF);
pd->hborder = 0;
pd->vborder = 0;
pd->misc = 0;
}
}
static void dsi_drm_update_checksum(struct edid *edid)
{
u8 *data = (u8 *)edid;
u32 i, sum = 0;
for (i = 0; i < EDID_LENGTH - 1; i++)
sum += data[i];
edid->checksum = 0x100 - (sum & 0xFF);
}
int dsi_connector_get_modes(struct drm_connector *connector, void *data)
{
int rc, i;
u32 count = 0, edid_size;
struct dsi_display_mode *modes = NULL;
struct drm_display_mode drm_mode;
struct dsi_display *display = data;
struct edid edid;
const u8 edid_buf[EDID_LENGTH] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x44, 0x6D,
0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1B, 0x10, 0x01, 0x03,
0x80, 0x50, 0x2D, 0x78, 0x0A, 0x0D, 0xC9, 0xA0, 0x57, 0x47,
0x98, 0x27, 0x12, 0x48, 0x4C, 0x00, 0x00, 0x00, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01,
};
edid_size = min_t(u32, sizeof(edid), EDID_LENGTH);
memcpy(&edid, edid_buf, edid_size);
if (sde_connector_get_panel(connector)) {
/*
* TODO: If drm_panel is attached, query modes from the panel.
* This is complicated in split dsi cases because panel is not
* attached to both connectors.
*/
goto end;
}
rc = dsi_display_get_mode_count(display, &count);
if (rc) {
pr_err("failed to get num of modes, rc=%d\n", rc);
goto end;
}
rc = dsi_display_get_modes(display, &modes);
if (rc) {
pr_err("failed to get modes, rc=%d\n", rc);
count = 0;
goto end;
}
for (i = 0; i < count; i++) {
struct drm_display_mode *m;
memset(&drm_mode, 0x0, sizeof(drm_mode));
dsi_convert_to_drm_mode(&modes[i], &drm_mode);
m = drm_mode_duplicate(connector->dev, &drm_mode);
if (!m) {
pr_err("failed to add mode %ux%u\n",
drm_mode.hdisplay,
drm_mode.vdisplay);
count = -ENOMEM;
goto end;
}
m->width_mm = connector->display_info.width_mm;
m->height_mm = connector->display_info.height_mm;
drm_mode_probed_add(connector, m);
}
rc = dsi_drm_update_edid_name(&edid, display->panel->name);
if (rc) {
count = 0;
goto end;
}
dsi_drm_update_dtd(&edid, modes, count);
dsi_drm_update_checksum(&edid);
rc = drm_connector_update_edid_property(connector, &edid);
if (rc)
count = 0;
end:
pr_debug("MODE COUNT =%d\n\n", count);
return count;
}
enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode,
void *display)
{
struct dsi_display_mode dsi_mode;
int rc;
if (!connector || !mode) {
pr_err("Invalid params\n");
return MODE_ERROR;
}
convert_to_dsi_mode(mode, &dsi_mode);
rc = dsi_display_validate_mode(display, &dsi_mode,
DSI_VALIDATE_FLAG_ALLOW_ADJUST);
if (rc) {
pr_err("mode not supported, rc=%d\n", rc);
return MODE_BAD;
}
return MODE_OK;
}
int dsi_conn_pre_kickoff(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params)
{
if (!connector || !display || !params) {
pr_err("Invalid params\n");
return -EINVAL;
}
return dsi_display_pre_kickoff(connector, display, params);
}
void dsi_conn_enable_event(struct drm_connector *connector,
uint32_t event_idx, bool enable, void *display)
{
struct dsi_event_cb_info event_info;
memset(&event_info, 0, sizeof(event_info));
event_info.event_cb = sde_connector_trigger_event;
event_info.event_usr_ptr = connector;
dsi_display_enable_event(connector, display,
event_idx, &event_info, enable);
}
int dsi_conn_post_kickoff(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct dsi_bridge *c_bridge;
struct dsi_display_mode adj_mode;
struct dsi_display *display;
struct dsi_display_ctrl *m_ctrl, *ctrl;
int i, rc = 0;
if (!connector || !connector->state) {
pr_err("invalid connector or connector state\n");
return -EINVAL;
}
encoder = connector->state->best_encoder;
if (!encoder) {
pr_debug("best encoder is not available\n");
return 0;
}
c_bridge = to_dsi_bridge(encoder->bridge);
adj_mode = c_bridge->dsi_mode;
display = c_bridge->display;
if (adj_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR) {
m_ctrl = &display->ctrl[display->clk_master_idx];
rc = dsi_ctrl_timing_db_update(m_ctrl->ctrl, false);
if (rc) {
pr_err("[%s] failed to dfps update rc=%d\n",
display->name, rc);
return -EINVAL;
}
/* Update the rest of the controllers */
display_for_each_ctrl(i, display) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl || (ctrl == m_ctrl))
continue;
rc = dsi_ctrl_timing_db_update(ctrl->ctrl, false);
if (rc) {
pr_err("[%s] failed to dfps update rc=%d\n",
display->name, rc);
return -EINVAL;
}
}
c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
}
return 0;
}
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder)
{
int rc = 0;
struct dsi_bridge *bridge;
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge) {
rc = -ENOMEM;
goto error;
}
bridge->display = display;
bridge->base.funcs = &dsi_bridge_ops;
bridge->base.encoder = encoder;
rc = drm_bridge_attach(encoder, &bridge->base, NULL);
if (rc) {
pr_err("failed to attach bridge, rc=%d\n", rc);
goto error_free_bridge;
}
encoder->bridge = &bridge->base;
return bridge;
error_free_bridge:
kfree(bridge);
error:
return ERR_PTR(rc);
}
void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge)
{
if (bridge && bridge->base.encoder)
bridge->base.encoder->bridge = NULL;
kfree(bridge);
}

133
msm/dsi/dsi_drm.h Normal file
View File

@@ -0,0 +1,133 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_DRM_H_
#define _DSI_DRM_H_
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include "msm_drv.h"
#include "dsi_display.h"
struct dsi_bridge {
struct drm_bridge base;
u32 id;
struct dsi_display *display;
struct dsi_display_mode dsi_mode;
};
/**
* dsi_conn_set_info_blob - callback to perform info blob initialization
* @connector: Pointer to drm connector structure
* @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
* @mode_info: Pointer to mode info structure
* Returns: Zero on success
*/
int dsi_conn_set_info_blob(struct drm_connector *connector,
void *info,
void *display,
struct msm_mode_info *mode_info);
/**
* dsi_conn_detect - callback to determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
* @display: Pointer to private display handle
* Returns: Connector 'is connected' status
*/
enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
bool force,
void *display);
/**
* dsi_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: Number of modes added
*/
int dsi_connector_get_modes(struct drm_connector *connector,
void *display);
/**
* dsi_connector_put_modes - callback to free up drm modes of the connector
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
*/
void dsi_connector_put_modes(struct drm_connector *connector,
void *display);
/**
* dsi_conn_get_mode_info - retrieve information on the mode selected
* @drm_mode: Display mode set for the display
* @mode_info: Out parameter. information of the mode.
* @max_mixer_width: max width supported by HW layer mixer
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int dsi_conn_get_mode_info(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info, u32 max_mixer_width,
void *display);
/**
* dsi_conn_mode_valid - callback to determine if specified mode is valid
* @connector: Pointer to drm connector structure
* @mode: Pointer to drm mode structure
* @display: Pointer to private display handle
* Returns: Validity status for specified mode
*/
enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode,
void *display);
/**
* dsi_conn_enable_event - callback to notify DSI driver of event registration
* @connector: Pointer to drm connector structure
* @event_idx: Connector event index
* @enable: Whether or not the event is enabled
* @display: Pointer to private display handle
*/
void dsi_conn_enable_event(struct drm_connector *connector,
uint32_t event_idx, bool enable, void *display);
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder);
void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge);
/**
* dsi_display_pre_kickoff - program kickoff-time features
* @connector: Pointer to drm connector structure
* @display: Pointer to private display structure
* @params: Parameters for kickoff-time programming
* Returns: Zero on success
*/
int dsi_conn_pre_kickoff(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params);
/**
* dsi_display_post_kickoff - program post kickoff-time features
* @connector: Pointer to drm connector structure
* Returns: Zero on success
*/
int dsi_conn_post_kickoff(struct drm_connector *connector);
/**
* dsi_convert_to_drm_mode - Update drm mode with dsi mode information
* @dsi_mode: input parameter. structure having dsi mode information.
* @drm_mode: output parameter. DRM mode set for the display
*/
void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
struct drm_display_mode *drm_mode);
#endif /* _DSI_DRM_H_ */

47
msm/dsi/dsi_hw.h Normal file
View File

@@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_HW_H_
#define _DSI_HW_H_
#include <linux/io.h>
#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
#define DSI_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, (uint32_t)(val)); \
writel_relaxed((val), (dsi_hw)->base + (off)); \
} while (0)
#define DSI_MMSS_MISC_R32(dsi_hw, off) \
readl_relaxed((dsi_hw)->mmss_misc_base + (off))
#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, val); \
writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
} while (0)
#define DSI_MISC_R32(dsi_hw, off) \
readl_relaxed((dsi_hw)->phy_clamp_base + (off))
#define DSI_MISC_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, val); \
writel_relaxed((val), (dsi_hw)->phy_clamp_base + (off)); \
} while (0)
#define DSI_DISP_CC_R32(dsi_hw, off) \
readl_relaxed((dsi_hw)->disp_cc_base + (off))
#define DSI_DISP_CC_W32(dsi_hw, off, val) \
do {\
pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
(dsi_hw)->index, #off, val); \
writel_relaxed((val), (dsi_hw)->disp_cc_base + (off)); \
} while (0)
#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
#endif /* _DSI_HW_H_ */

3882
msm/dsi/dsi_panel.c Normal file

File diff suppressed because it is too large Load Diff

296
msm/dsi/dsi_panel.h Normal file
View File

@@ -0,0 +1,296 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PANEL_H_
#define _DSI_PANEL_H_
#include <linux/of_device.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/backlight.h>
#include <drm/drm_panel.h>
#include <drm/msm_drm.h>
#include "dsi_defs.h"
#include "dsi_ctrl_hw.h"
#include "dsi_clk.h"
#include "dsi_pwr.h"
#include "dsi_parser.h"
#include "msm_drv.h"
#define MAX_BL_LEVEL 4096
#define MAX_BL_SCALE_LEVEL 1024
#define MAX_SV_BL_SCALE_LEVEL 65535
#define DSI_CMD_PPS_SIZE 135
#define DSI_MODE_MAX 5
enum dsi_panel_rotation {
DSI_PANEL_ROTATE_NONE = 0,
DSI_PANEL_ROTATE_HV_FLIP,
DSI_PANEL_ROTATE_H_FLIP,
DSI_PANEL_ROTATE_V_FLIP
};
enum dsi_backlight_type {
DSI_BACKLIGHT_PWM = 0,
DSI_BACKLIGHT_WLED,
DSI_BACKLIGHT_DCS,
DSI_BACKLIGHT_EXTERNAL,
DSI_BACKLIGHT_UNKNOWN,
DSI_BACKLIGHT_MAX,
};
enum bl_update_flag {
BL_UPDATE_DELAY_UNTIL_FIRST_FRAME,
BL_UPDATE_NONE,
};
enum {
MODE_GPIO_NOT_VALID = 0,
MODE_SEL_DUAL_PORT,
MODE_SEL_SINGLE_PORT,
MODE_GPIO_HIGH,
MODE_GPIO_LOW,
};
enum dsi_dms_mode {
DSI_DMS_MODE_DISABLED = 0,
DSI_DMS_MODE_RES_SWITCH_IMMEDIATE,
};
struct dsi_dfps_capabilities {
bool dfps_support;
enum dsi_dfps_type type;
u32 min_refresh_rate;
u32 max_refresh_rate;
};
struct dsi_pinctrl_info {
struct pinctrl *pinctrl;
struct pinctrl_state *active;
struct pinctrl_state *suspend;
};
struct dsi_panel_phy_props {
u32 panel_width_mm;
u32 panel_height_mm;
enum dsi_panel_rotation rotation;
};
struct dsi_backlight_config {
enum dsi_backlight_type type;
enum bl_update_flag bl_update;
u32 bl_min_level;
u32 bl_max_level;
u32 brightness_max_level;
u32 bl_level;
u32 bl_scale;
u32 bl_scale_sv;
int en_gpio;
/* PWM params */
bool pwm_pmi_control;
u32 pwm_pmic_bank;
u32 pwm_period_usecs;
int pwm_gpio;
/* WLED params */
struct led_trigger *wled;
struct backlight_device *raw_bd;
};
struct dsi_reset_seq {
u32 level;
u32 sleep_ms;
};
struct dsi_panel_reset_config {
struct dsi_reset_seq *sequence;
u32 count;
int reset_gpio;
int disp_en_gpio;
int lcd_mode_sel_gpio;
u32 mode_sel_state;
};
enum esd_check_status_mode {
ESD_MODE_REG_READ,
ESD_MODE_SW_BTA,
ESD_MODE_PANEL_TE,
ESD_MODE_SW_SIM_SUCCESS,
ESD_MODE_SW_SIM_FAILURE,
ESD_MODE_MAX
};
struct drm_panel_esd_config {
bool esd_enabled;
enum esd_check_status_mode status_mode;
struct dsi_panel_cmd_set status_cmd;
u32 *status_cmds_rlen;
u32 *status_valid_params;
u32 *status_value;
u8 *return_buf;
u8 *status_buf;
u32 groups;
};
struct dsi_panel {
const char *name;
const char *type;
struct device_node *panel_of_node;
struct mipi_dsi_device mipi_device;
struct mutex panel_lock;
struct drm_panel drm_panel;
struct mipi_dsi_host *host;
struct device *parent;
struct dsi_host_common_cfg host_config;
struct dsi_video_engine_cfg video_config;
struct dsi_cmd_engine_cfg cmd_config;
enum dsi_op_mode panel_mode;
struct dsi_dfps_capabilities dfps_caps;
struct dsi_panel_phy_props phy_props;
struct dsi_display_mode *cur_mode;
u32 num_timing_nodes;
struct dsi_regulator_info power_info;
struct dsi_backlight_config bl_config;
struct dsi_panel_reset_config reset_config;
struct dsi_pinctrl_info pinctrl;
struct drm_panel_hdr_properties hdr_props;
struct drm_panel_esd_config esd_config;
struct dsi_parser_utils utils;
bool lp11_init;
bool ulps_feature_enabled;
bool ulps_suspend_enabled;
bool allow_phy_power_off;
atomic_t esd_recovery_pending;
bool panel_initialized;
bool te_using_watchdog_timer;
u32 qsync_min_fps;
char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
enum dsi_dms_mode dms_mode;
bool sync_broadcast_en;
};
static inline bool dsi_panel_ulps_feature_enabled(struct dsi_panel *panel)
{
return panel->ulps_feature_enabled;
}
static inline bool dsi_panel_initialized(struct dsi_panel *panel)
{
return panel->panel_initialized;
}
static inline void dsi_panel_acquire_panel_lock(struct dsi_panel *panel)
{
mutex_lock(&panel->panel_lock);
}
static inline void dsi_panel_release_panel_lock(struct dsi_panel *panel)
{
mutex_unlock(&panel->panel_lock);
}
struct dsi_panel *dsi_panel_get(struct device *parent,
struct device_node *of_node,
struct device_node *parser_node,
const char *type,
int topology_override);
int dsi_panel_trigger_esd_attack(struct dsi_panel *panel);
void dsi_panel_put(struct dsi_panel *panel);
int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
int dsi_panel_drv_deinit(struct dsi_panel *panel);
int dsi_panel_get_mode_count(struct dsi_panel *panel);
void dsi_panel_put_mode(struct dsi_display_mode *mode);
int dsi_panel_get_mode(struct dsi_panel *panel,
u32 index,
struct dsi_display_mode *mode,
int topology_override);
int dsi_panel_validate_mode(struct dsi_panel *panel,
struct dsi_display_mode *mode);
int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
struct dsi_display_mode *mode,
struct dsi_host_config *config);
int dsi_panel_get_phy_props(struct dsi_panel *panel,
struct dsi_panel_phy_props *phy_props);
int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
struct dsi_dfps_capabilities *dfps_caps);
int dsi_panel_pre_prepare(struct dsi_panel *panel);
int dsi_panel_set_lp1(struct dsi_panel *panel);
int dsi_panel_set_lp2(struct dsi_panel *panel);
int dsi_panel_set_nolp(struct dsi_panel *panel);
int dsi_panel_prepare(struct dsi_panel *panel);
int dsi_panel_enable(struct dsi_panel *panel);
int dsi_panel_post_enable(struct dsi_panel *panel);
int dsi_panel_pre_disable(struct dsi_panel *panel);
int dsi_panel_disable(struct dsi_panel *panel);
int dsi_panel_unprepare(struct dsi_panel *panel);
int dsi_panel_post_unprepare(struct dsi_panel *panel);
int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
int dsi_panel_update_pps(struct dsi_panel *panel);
int dsi_panel_send_qsync_on_dcs(struct dsi_panel *panel,
int ctrl_idx);
int dsi_panel_send_qsync_off_dcs(struct dsi_panel *panel,
int ctrl_idx);
int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
struct dsi_rect *roi);
int dsi_panel_switch(struct dsi_panel *panel);
int dsi_panel_post_switch(struct dsi_panel *panel);
void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width);
void dsi_panel_bl_handoff(struct dsi_panel *panel);
struct dsi_panel *dsi_panel_ext_bridge_get(struct device *parent,
struct device_node *of_node,
int topology_override);
int dsi_panel_parse_esd_reg_read_configs(struct dsi_panel *panel);
void dsi_panel_ext_bridge_put(struct dsi_panel *panel);
#endif /* _DSI_PANEL_H_ */

1248
msm/dsi/dsi_parser.c Normal file

File diff suppressed because it is too large Load Diff

235
msm/dsi/dsi_parser.h Normal file
View File

@@ -0,0 +1,235 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PARSER_H_
#define _DSI_PARSER_H_
#include <linux/of.h>
#include <linux/of_gpio.h>
#ifdef CONFIG_DSI_PARSER
void *dsi_parser_get(struct device *dev);
void dsi_parser_put(void *data);
int dsi_parser_dbg_init(void *parser, struct dentry *dir);
void *dsi_parser_get_head_node(void *parser,
const u8 *data, u32 size);
const void *dsi_parser_get_property(const struct device_node *np,
const char *name, int *lenp);
bool dsi_parser_read_bool(const struct device_node *np,
const char *propname);
int dsi_parser_read_u64(const struct device_node *np, const char *propname,
u64 *out_value);
int dsi_parser_read_u32(const struct device_node *np,
const char *propname, u32 *out_value);
int dsi_parser_read_u32_array(const struct device_node *np,
const char *propname,
u32 *out_values, size_t sz);
int dsi_parser_read_string(const struct device_node *np,
const char *propname, const char **out_string);
struct device_node *dsi_parser_get_child_by_name(const struct device_node *node,
const char *name);
int dsi_parser_get_child_count(const struct device_node *np);
struct property *dsi_parser_find_property(const struct device_node *np,
const char *name, int *lenp);
struct device_node *dsi_parser_get_next_child(const struct device_node *np,
struct device_node *prev);
int dsi_parser_count_u32_elems(const struct device_node *np,
const char *propname);
int dsi_parser_count_strings(const struct device_node *np,
const char *propname);
int dsi_parser_read_string_index(const struct device_node *np,
const char *propname,
int index, const char **output);
int dsi_parser_get_named_gpio(struct device_node *np,
const char *propname, int index);
#else /* CONFIG_DSI_PARSER */
static inline void *dsi_parser_get(struct device *dev)
{
return NULL;
}
static inline void dsi_parser_put(void *data)
{
}
static inline int dsi_parser_dbg_init(void *parser, struct dentry *dir)
{
return -ENODEV;
}
static inline void *dsi_parser_get_head_node(void *parser,
const u8 *data, u32 size)
{
return NULL;
}
static inline const void *dsi_parser_get_property(const struct device_node *np,
const char *name, int *lenp)
{
return NULL;
}
static inline bool dsi_parser_read_bool(const struct device_node *np,
const char *propname)
{
return false;
}
static inline int dsi_parser_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
return -ENODEV;
}
static inline int dsi_parser_read_u32(const struct device_node *np,
const char *propname, u32 *out_value)
{
return -ENODEV;
}
static inline int dsi_parser_read_u32_array(const struct device_node *np,
const char *propname, u32 *out_values, size_t sz)
{
return -ENODEV;
}
static inline int dsi_parser_read_string(const struct device_node *np,
const char *propname, const char **out_string)
{
return -ENODEV;
}
static inline struct device_node *dsi_parser_get_child_by_name(
const struct device_node *node,
const char *name)
{
return NULL;
}
static inline int dsi_parser_get_child_count(const struct device_node *np)
{
return -ENODEV;
}
static inline struct property *dsi_parser_find_property(
const struct device_node *np,
const char *name, int *lenp)
{
return NULL;
}
static inline struct device_node *dsi_parser_get_next_child(
const struct device_node *np,
struct device_node *prev)
{
return NULL;
}
static inline int dsi_parser_count_u32_elems(const struct device_node *np,
const char *propname)
{
return -ENODEV;
}
static inline int dsi_parser_count_strings(const struct device_node *np,
const char *propname)
{
return -ENODEV;
}
static inline int dsi_parser_read_string_index(const struct device_node *np,
const char *propname,
int index, const char **output)
{
return -ENODEV;
}
static inline int dsi_parser_get_named_gpio(struct device_node *np,
const char *propname, int index)
{
return -ENODEV;
}
#endif /* CONFIG_DSI_PARSER */
#define dsi_for_each_child_node(parent, child) \
for (child = utils->get_next_child(parent, NULL); \
child != NULL; \
child = utils->get_next_child(parent, child))
struct dsi_parser_utils {
void *data;
struct device_node *node;
const void *(*get_property)(const struct device_node *np,
const char *name, int *lenp);
int (*read_u64)(const struct device_node *np,
const char *propname, u64 *out_value);
int (*read_u32)(const struct device_node *np,
const char *propname, u32 *out_value);
bool (*read_bool)(const struct device_node *np,
const char *propname);
int (*read_u32_array)(const struct device_node *np,
const char *propname, u32 *out_values, size_t sz);
int (*read_string)(const struct device_node *np, const char *propname,
const char **out_string);
struct device_node *(*get_child_by_name)(
const struct device_node *node,
const char *name);
int (*get_child_count)(const struct device_node *np);
struct property *(*find_property)(const struct device_node *np,
const char *name, int *lenp);
struct device_node *(*get_next_child)(const struct device_node *np,
struct device_node *prev);
int (*count_u32_elems)(const struct device_node *np,
const char *propname);
int (*get_named_gpio)(struct device_node *np,
const char *propname, int index);
int (*get_available_child_count)(const struct device_node *np);
};
static inline struct dsi_parser_utils *dsi_parser_get_of_utils(void)
{
static struct dsi_parser_utils of_utils = {
.get_property = of_get_property,
.read_bool = of_property_read_bool,
.read_u64 = of_property_read_u64,
.read_u32 = of_property_read_u32,
.read_u32_array = of_property_read_u32_array,
.read_string = of_property_read_string,
.get_child_by_name = of_get_child_by_name,
.get_child_count = of_get_child_count,
.get_available_child_count = of_get_available_child_count,
.find_property = of_find_property,
.get_next_child = of_get_next_child,
.count_u32_elems = of_property_count_u32_elems,
.get_named_gpio = of_get_named_gpio,
};
return &of_utils;
}
static inline struct dsi_parser_utils *dsi_parser_get_parser_utils(void)
{
static struct dsi_parser_utils parser_utils = {
.get_property = dsi_parser_get_property,
.read_bool = dsi_parser_read_bool,
.read_u64 = dsi_parser_read_u64,
.read_u32 = dsi_parser_read_u32,
.read_u32_array = dsi_parser_read_u32_array,
.read_string = dsi_parser_read_string,
.get_child_by_name = dsi_parser_get_child_by_name,
.get_child_count = dsi_parser_get_child_count,
.get_available_child_count = dsi_parser_get_child_count,
.find_property = dsi_parser_find_property,
.get_next_child = dsi_parser_get_next_child,
.count_u32_elems = dsi_parser_count_u32_elems,
.get_named_gpio = dsi_parser_get_named_gpio,
};
return &parser_utils;
}
#endif

1130
msm/dsi/dsi_phy.c Normal file

File diff suppressed because it is too large Load Diff

293
msm/dsi/dsi_phy.h Normal file
View File

@@ -0,0 +1,293 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PHY_H_
#define _DSI_PHY_H_
#include "dsi_defs.h"
#include "dsi_clk.h"
#include "dsi_pwr.h"
#include "dsi_phy_hw.h"
struct dsi_ver_spec_info {
enum dsi_phy_version version;
u32 lane_cfg_count;
u32 strength_cfg_count;
u32 regulator_cfg_count;
u32 timing_cfg_count;
};
/**
* struct dsi_phy_power_info - digital and analog power supplies for DSI PHY
* @digital: Digital power supply for DSI PHY.
* @phy_pwr: Analog power supplies for DSI PHY to work.
*/
struct dsi_phy_power_info {
struct dsi_regulator_info digital;
struct dsi_regulator_info phy_pwr;
};
/**
* enum phy_engine_state - define engine status for dsi phy.
* @DSI_PHY_ENGINE_OFF: Engine is turned off.
* @DSI_PHY_ENGINE_ON: Engine is turned on.
* @DSI_PHY_ENGINE_MAX: Maximum value.
*/
enum phy_engine_state {
DSI_PHY_ENGINE_OFF = 0,
DSI_PHY_ENGINE_ON,
DSI_PHY_ENGINE_MAX,
};
/**
* enum phy_ulps_return_type - define set_ulps return type for dsi phy.
* @DSI_PHY_ULPS_HANDLED: ulps is handled in phy.
* @DSI_PHY_ULPS_NOT_HANDLED: ulps is not handled in phy.
* @DSI_PHY_ULPS_ERROR: ulps request failed in phy.
*/
enum phy_ulps_return_type {
DSI_PHY_ULPS_HANDLED = 0,
DSI_PHY_ULPS_NOT_HANDLED,
DSI_PHY_ULPS_ERROR,
};
/**
* struct msm_dsi_phy - DSI PHY object
* @pdev: Pointer to platform device.
* @index: Instance id.
* @name: Name of the PHY instance.
* @refcount: Reference count.
* @phy_lock: Mutex for hardware and object access.
* @ver_info: Version specific phy parameters.
* @hw: DSI PHY hardware object.
* @pwr_info: Power information.
* @cfg: DSI phy configuration.
* @clk_cb: structure containing call backs for clock control
* @power_state: True if PHY is powered on.
* @dsi_phy_state: PHY state information.
* @mode: Current mode.
* @data_lanes: Number of data lanes used.
* @dst_format: Destination format.
* @allow_phy_power_off: True if PHY is allowed to power off when idle
* @regulator_min_datarate_bps: Minimum per lane data rate to turn on regulator
* @regulator_required: True if phy regulator is required
*/
struct msm_dsi_phy {
struct platform_device *pdev;
int index;
const char *name;
u32 refcount;
struct mutex phy_lock;
const struct dsi_ver_spec_info *ver_info;
struct dsi_phy_hw hw;
struct dsi_phy_power_info pwr_info;
struct dsi_phy_cfg cfg;
struct clk_ctrl_cb clk_cb;
enum phy_engine_state dsi_phy_state;
bool power_state;
struct dsi_mode_info mode;
enum dsi_data_lanes data_lanes;
enum dsi_pixel_format dst_format;
bool allow_phy_power_off;
u32 regulator_min_datarate_bps;
bool regulator_required;
};
/**
* dsi_phy_get() - get a dsi phy handle from device node
* @of_node: device node for dsi phy controller
*
* Gets the DSI PHY handle for the corresponding of_node. The ref count is
* incremented to one all subsequents get will fail until the original client
* calls a put.
*
* Return: DSI PHY handle or an error code.
*/
struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node);
/**
* dsi_phy_put() - release dsi phy handle
* @dsi_phy: DSI PHY handle.
*
* Release the DSI PHY hardware. Driver will clean up all resources and puts
* back the DSI PHY into reset state.
*/
void dsi_phy_put(struct msm_dsi_phy *dsi_phy);
/**
* dsi_phy_drv_init() - initialize dsi phy driver
* @dsi_phy: DSI PHY handle.
*
* Initializes DSI PHY driver. Should be called after dsi_phy_get().
*
* Return: error code.
*/
int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy);
/**
* dsi_phy_drv_deinit() - de-initialize dsi phy driver
* @dsi_phy: DSI PHY handle.
*
* Release all resources acquired by dsi_phy_drv_init().
*
* Return: error code.
*/
int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy);
/**
* dsi_phy_validate_mode() - validate a display mode
* @dsi_phy: DSI PHY handle.
* @mode: Mode information.
*
* Validation will fail if the mode cannot be supported by the PHY driver or
* hardware.
*
* Return: error code.
*/
int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
struct dsi_mode_info *mode);
/**
* dsi_phy_set_power_state() - enable/disable dsi phy power supplies
* @dsi_phy: DSI PHY handle.
* @enable: Boolean flag to enable/disable.
*
* Return: error code.
*/
int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
/**
* dsi_phy_enable() - enable DSI PHY hardware
* @dsi_phy: DSI PHY handle.
* @config: DSI host configuration.
* @pll_source: Source PLL for PHY clock.
* @skip_validation: Validation will not be performed on parameters.
* @is_cont_splash_enabled: check whether continuous splash enabled.
*
* Validates and enables DSI PHY.
*
* Return: error code.
*/
int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
struct dsi_host_config *config,
enum dsi_phy_pll_source pll_source,
bool skip_validation,
bool is_cont_splash_enabled);
/**
* dsi_phy_disable() - disable DSI PHY hardware.
* @phy: DSI PHY handle.
*
* Return: error code.
*/
int dsi_phy_disable(struct msm_dsi_phy *phy);
/**
* dsi_phy_set_ulps() - set ulps state for DSI pHY
* @phy: DSI PHY handle
* @config: DSi host configuration information.
* @enable: Enable/Disable
* @clamp_enabled: mmss_clamp enabled/disabled
*
* Return: error code.
*/
int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
bool enable, bool clamp_enabled);
/**
* dsi_phy_clk_cb_register() - Register PHY clock control callback
* @phy: DSI PHY handle
* @clk_cb: Structure containing call back for clock control
*
* Return: error code.
*/
int dsi_phy_clk_cb_register(struct msm_dsi_phy *phy,
struct clk_ctrl_cb *clk_cb);
/**
* dsi_phy_idle_ctrl() - enable/disable DSI PHY during idle screen
* @phy: DSI PHY handle
* @enable: boolean to specify PHY enable/disable.
*
* Return: error code.
*/
int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable);
/**
* dsi_phy_set_clamp_state() - configure clamps for DSI lanes
* @phy: DSI PHY handle.
* @enable: boolean to specify clamp enable/disable.
*
* Return: error code.
*/
int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable);
/**
* dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
* @phy: DSI PHY handle
* @clk_freq: link clock frequency
*
* Return: error code.
*/
int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
struct link_clk_freq *clk_freq);
/**
* dsi_phy_set_timing_params() - timing parameters for the panel
* @phy: DSI PHY handle
* @timing: array holding timing params.
* @size: size of the array.
*
* When PHY timing calculator is not implemented, this array will be used to
* pass PHY timing information.
*
* Return: error code.
*/
int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
u32 *timing, u32 size);
/**
* dsi_phy_lane_reset() - Reset DSI PHY lanes in case of error
* @phy: DSI PHY handle
*
* Return: error code.
*/
int dsi_phy_lane_reset(struct msm_dsi_phy *phy);
/**
* dsi_phy_toggle_resync_fifo() - toggle resync retime FIFO
* @phy: DSI PHY handle
*
* Toggle the resync retime FIFO to synchronize the data paths.
* This should be done everytime there is a change in the link clock
* rate
*/
void dsi_phy_toggle_resync_fifo(struct msm_dsi_phy *phy);
/**
* dsi_phy_reset_clk_en_sel() - reset clk_en_select on cmn_clk_cfg1 register
* @phy: DSI PHY handle
*
* After toggling resync fifo regiater, clk_en_sel bit on cmn_clk_cfg1
* register has to be reset
*/
void dsi_phy_reset_clk_en_sel(struct msm_dsi_phy *phy);
/**
* dsi_phy_drv_register() - register platform driver for dsi phy
*/
void dsi_phy_drv_register(void);
/**
* dsi_phy_drv_unregister() - unregister platform driver
*/
void dsi_phy_drv_unregister(void);
#endif /* _DSI_PHY_H_ */

312
msm/dsi/dsi_phy_hw.h Normal file
View File

@@ -0,0 +1,312 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PHY_HW_H_
#define _DSI_PHY_HW_H_
#include "dsi_defs.h"
#define DSI_MAX_SETTINGS 8
#define DSI_PHY_TIMING_V3_SIZE 12
#define DSI_PHY_TIMING_V4_SIZE 14
/**
* enum dsi_phy_version - DSI PHY version enumeration
* @DSI_PHY_VERSION_UNKNOWN: Unknown version.
* @DSI_PHY_VERSION_0_0_HPM: 28nm-HPM.
* @DSI_PHY_VERSION_0_0_LPM: 28nm-HPM.
* @DSI_PHY_VERSION_1_0: 20nm
* @DSI_PHY_VERSION_2_0: 14nm
* @DSI_PHY_VERSION_3_0: 10nm
* @DSI_PHY_VERSION_4_0: 7nm
* @DSI_PHY_VERSION_4_1: 7nm
* @DSI_PHY_VERSION_MAX:
*/
enum dsi_phy_version {
DSI_PHY_VERSION_UNKNOWN,
DSI_PHY_VERSION_0_0_HPM, /* 28nm-HPM */
DSI_PHY_VERSION_0_0_LPM, /* 28nm-LPM */
DSI_PHY_VERSION_1_0, /* 20nm */
DSI_PHY_VERSION_2_0, /* 14nm */
DSI_PHY_VERSION_3_0, /* 10nm */
DSI_PHY_VERSION_4_0, /* 7nm */
DSI_PHY_VERSION_4_1, /* 7nm */
DSI_PHY_VERSION_MAX
};
/**
* enum dsi_phy_hw_features - features supported by DSI PHY hardware
* @DSI_PHY_DPHY: Supports DPHY
* @DSI_PHY_CPHY: Supports CPHY
* @DSI_PHY_MAX_FEATURES:
*/
enum dsi_phy_hw_features {
DSI_PHY_DPHY,
DSI_PHY_CPHY,
DSI_PHY_MAX_FEATURES
};
/**
* enum dsi_phy_pll_source - pll clock source for PHY.
* @DSI_PLL_SOURCE_STANDALONE: Clock is sourced from native PLL and is not
* shared by other PHYs.
* @DSI_PLL_SOURCE_NATIVE: Clock is sourced from native PLL and is
* shared by other PHYs.
* @DSI_PLL_SOURCE_NON_NATIVE: Clock is sourced from other PHYs.
* @DSI_PLL_SOURCE_MAX:
*/
enum dsi_phy_pll_source {
DSI_PLL_SOURCE_STANDALONE = 0,
DSI_PLL_SOURCE_NATIVE,
DSI_PLL_SOURCE_NON_NATIVE,
DSI_PLL_SOURCE_MAX
};
/**
* struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
* @lane: A set of maximum 8 values for each lane.
* @lane_v3: A set of maximum 12 values for each lane.
* @count_per_lane: Number of values per each lane.
*/
struct dsi_phy_per_lane_cfgs {
u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
u8 lane_v3[DSI_PHY_TIMING_V3_SIZE];
u8 lane_v4[DSI_PHY_TIMING_V4_SIZE];
u32 count_per_lane;
};
/**
* struct dsi_phy_cfg - DSI PHY configuration
* @lanecfg: Lane configuration settings.
* @strength: Strength settings for lanes.
* @timing: Timing parameters for lanes.
* @is_phy_timing_present: Boolean whether phy timings are defined.
* @regulators: Regulator settings for lanes.
* @pll_source: PLL source.
* @lane_map: DSI logical to PHY lane mapping.
* @force_clk_lane_hs:Boolean whether to force clock lane in HS mode.
* @bit_clk_rate_hz: DSI bit clk rate in HZ.
*/
struct dsi_phy_cfg {
struct dsi_phy_per_lane_cfgs lanecfg;
struct dsi_phy_per_lane_cfgs strength;
struct dsi_phy_per_lane_cfgs timing;
bool is_phy_timing_present;
struct dsi_phy_per_lane_cfgs regulators;
enum dsi_phy_pll_source pll_source;
struct dsi_lane_map lane_map;
bool force_clk_lane_hs;
unsigned long bit_clk_rate_hz;
};
struct dsi_phy_hw;
struct phy_ulps_config_ops {
/**
* wait_for_lane_idle() - wait for DSI lanes to go to idle state
* @phy: Pointer to DSI PHY hardware instance.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to be checked to be in idle state.
*/
int (*wait_for_lane_idle)(struct dsi_phy_hw *phy, u32 lanes);
/**
* ulps_request() - request ulps entry for specified lanes
* @phy: Pointer to DSI PHY hardware instance.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to enter ULPS.
*
* Caller should check if lanes are in ULPS mode by calling
* get_lanes_in_ulps() operation.
*/
void (*ulps_request)(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
/**
* ulps_exit() - exit ULPS on specified lanes
* @phy: Pointer to DSI PHY hardware instance.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
* to exit ULPS.
*
* Caller should check if lanes are in active mode by calling
* get_lanes_in_ulps() operation.
*/
void (*ulps_exit)(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes);
/**
* get_lanes_in_ulps() - returns the list of lanes in ULPS mode
* @phy: Pointer to DSI PHY hardware instance.
*
* Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
* state.
*
* Return: List of lanes in ULPS state.
*/
u32 (*get_lanes_in_ulps)(struct dsi_phy_hw *phy);
/**
* is_lanes_in_ulps() - checks if the given lanes are in ulps
* @lanes: lanes to be checked.
* @ulps_lanes: lanes in ulps currenly.
*
* Return: true if all the given lanes are in ulps; false otherwise.
*/
bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
};
/**
* struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
* @regulator_enable: Enable PHY regulators.
* @regulator_disable: Disable PHY regulators.
* @enable: Enable PHY.
* @disable: Disable PHY.
* @calculate_timing_params: Calculate PHY timing params from mode information
*/
struct dsi_phy_hw_ops {
/**
* regulator_enable() - enable regulators for DSI PHY
* @phy: Pointer to DSI PHY hardware object.
* @reg_cfg: Regulator configuration for all DSI lanes.
*/
void (*regulator_enable)(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *reg_cfg);
/**
* regulator_disable() - disable regulators
* @phy: Pointer to DSI PHY hardware object.
*/
void (*regulator_disable)(struct dsi_phy_hw *phy);
/**
* enable() - Enable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
/**
* disable() - Disable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void (*disable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
/**
* phy_idle_on() - Enable PHY hardware when entering idle screen
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void (*phy_idle_on)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
/**
* phy_idle_off() - Disable PHY hardware when exiting idle screen
* @phy: Pointer to DSI PHY hardware object.
*/
void (*phy_idle_off)(struct dsi_phy_hw *phy);
/**
* calculate_timing_params() - calculates timing parameters.
* @phy: Pointer to DSI PHY hardware object.
* @mode: Mode information for which timing has to be calculated.
* @config: DSI host configuration for this mode.
* @timing: Timing parameters for each lane which will be returned.
*/
int (*calculate_timing_params)(struct dsi_phy_hw *phy,
struct dsi_mode_info *mode,
struct dsi_host_common_cfg *config,
struct dsi_phy_per_lane_cfgs *timing);
/**
* phy_timing_val() - Gets PHY timing values.
* @timing_val: Timing parameters for each lane which will be returned.
* @timing: Array containing PHY timing values
* @size: Size of the array
*/
int (*phy_timing_val)(struct dsi_phy_per_lane_cfgs *timing_val,
u32 *timing, u32 size);
/**
* clamp_ctrl() - configure clamps for DSI lanes
* @phy: DSI PHY handle.
* @enable: boolean to specify clamp enable/disable.
* Return: error code.
*/
void (*clamp_ctrl)(struct dsi_phy_hw *phy, bool enable);
/**
* phy_lane_reset() - Reset dsi phy lanes in case of error.
* @phy: Pointer to DSI PHY hardware object.
* Return: error code.
*/
int (*phy_lane_reset)(struct dsi_phy_hw *phy);
/**
* toggle_resync_fifo() - toggle resync retime FIFO to sync data paths
* @phy: Pointer to DSI PHY hardware object.
* Return: error code.
*/
void (*toggle_resync_fifo)(struct dsi_phy_hw *phy);
/**
* reset_clk_en_sel() - reset clk_en_sel on phy cmn_clk_cfg1 register
* @phy: Pointer to DSI PHY hardware object.
*/
void (*reset_clk_en_sel)(struct dsi_phy_hw *phy);
void *timing_ops;
struct phy_ulps_config_ops ulps_ops;
};
/**
* struct dsi_phy_hw - DSI phy hardware object specific to an instance
* @base: VA for the DSI PHY base address.
* @length: Length of the DSI PHY register base map.
* @index: Instance ID of the controller.
* @version: DSI PHY version.
* @phy_clamp_base: Base address of phy clamp register map.
* @feature_map: Features supported by DSI PHY.
* @ops: Function pointer to PHY operations.
*/
struct dsi_phy_hw {
void __iomem *base;
u32 length;
u32 index;
enum dsi_phy_version version;
void __iomem *phy_clamp_base;
DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
struct dsi_phy_hw_ops ops;
};
/**
* dsi_phy_conv_phy_to_logical_lane() - Convert physical to logical lane
* @lane_map: logical lane
* @phy_lane: physical lane
*
* Return: Error code on failure. Lane number on success.
*/
int dsi_phy_conv_phy_to_logical_lane(
struct dsi_lane_map *lane_map, enum dsi_phy_data_lanes phy_lane);
/**
* dsi_phy_conv_logical_to_phy_lane() - Convert logical to physical lane
* @lane_map: physical lane
* @lane: logical lane
*
* Return: Error code on failure. Lane number on success.
*/
int dsi_phy_conv_logical_to_phy_lane(
struct dsi_lane_map *lane_map, enum dsi_logical_lane lane);
#endif /* _DSI_PHY_HW_H_ */

267
msm/dsi/dsi_phy_hw_v2_0.c Normal file
View File

@@ -0,0 +1,267 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-hw:" fmt
#include <linux/math64.h>
#include <linux/delay.h>
#include "dsi_hw.h"
#include "dsi_phy_hw.h"
#define DSIPHY_CMN_REVISION_ID0 0x0000
#define DSIPHY_CMN_REVISION_ID1 0x0004
#define DSIPHY_CMN_REVISION_ID2 0x0008
#define DSIPHY_CMN_REVISION_ID3 0x000C
#define DSIPHY_CMN_CLK_CFG0 0x0010
#define DSIPHY_CMN_CLK_CFG1 0x0014
#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
#define DSIPHY_CMN_CTRL_0 0x001C
#define DSIPHY_CMN_CTRL_1 0x0020
#define DSIPHY_CMN_CAL_HW_TRIGGER 0x0024
#define DSIPHY_CMN_CAL_SW_CFG0 0x0028
#define DSIPHY_CMN_CAL_SW_CFG1 0x002C
#define DSIPHY_CMN_CAL_SW_CFG2 0x0030
#define DSIPHY_CMN_CAL_HW_CFG0 0x0034
#define DSIPHY_CMN_CAL_HW_CFG1 0x0038
#define DSIPHY_CMN_CAL_HW_CFG2 0x003C
#define DSIPHY_CMN_CAL_HW_CFG3 0x0040
#define DSIPHY_CMN_CAL_HW_CFG4 0x0044
#define DSIPHY_CMN_PLL_CNTRL 0x0048
#define DSIPHY_CMN_LDO_CNTRL 0x004C
#define DSIPHY_CMN_REGULATOR_CAL_STATUS0 0x0064
#define DSIPHY_CMN_REGULATOR_CAL_STATUS1 0x0068
#define DSI_MDP_ULPS_CLAMP_ENABLE_OFF 0x0054
/* n = 0..3 for data lanes and n = 4 for clock lane */
#define DSIPHY_DLNX_CFG0(n) (0x100 + ((n) * 0x80))
#define DSIPHY_DLNX_CFG1(n) (0x104 + ((n) * 0x80))
#define DSIPHY_DLNX_CFG2(n) (0x108 + ((n) * 0x80))
#define DSIPHY_DLNX_CFG3(n) (0x10C + ((n) * 0x80))
#define DSIPHY_DLNX_TEST_DATAPATH(n) (0x110 + ((n) * 0x80))
#define DSIPHY_DLNX_TEST_STR(n) (0x114 + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_4(n) (0x118 + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_5(n) (0x11C + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_6(n) (0x120 + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_7(n) (0x124 + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_8(n) (0x128 + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_9(n) (0x12C + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_10(n) (0x130 + ((n) * 0x80))
#define DSIPHY_DLNX_TIMING_CTRL_11(n) (0x134 + ((n) * 0x80))
#define DSIPHY_DLNX_STRENGTH_CTRL_0(n) (0x138 + ((n) * 0x80))
#define DSIPHY_DLNX_STRENGTH_CTRL_1(n) (0x13C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_POLY(n) (0x140 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_SEED0(n) (0x144 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_SEED1(n) (0x148 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_HEAD(n) (0x14C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_SOT(n) (0x150 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL0(n) (0x154 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL1(n) (0x158 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL2(n) (0x15C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_CTRL3(n) (0x160 + ((n) * 0x80))
#define DSIPHY_DLNX_VREG_CNTRL(n) (0x164 + ((n) * 0x80))
#define DSIPHY_DLNX_HSTX_STR_STATUS(n) (0x168 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS0(n) (0x16C + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS1(n) (0x170 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS2(n) (0x174 + ((n) * 0x80))
#define DSIPHY_DLNX_BIST_STATUS3(n) (0x178 + ((n) * 0x80))
#define DSIPHY_DLNX_MISR_STATUS(n) (0x17C + ((n) * 0x80))
#define DSIPHY_PLL_CLKBUFLR_EN 0x041C
#define DSIPHY_PLL_PLL_BANDGAP 0x0508
/**
* regulator_enable() - enable regulators for DSI PHY
* @phy: Pointer to DSI PHY hardware object.
* @reg_cfg: Regulator configuration for all DSI lanes.
*/
void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *reg_cfg)
{
int i;
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
/* make sure all values are written to hardware */
wmb();
pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
}
/**
* regulator_disable() - disable regulators
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy)
{
pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
}
/**
* enable() - Enable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int i;
struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
u32 data;
DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
cfg->strength.lane[i][0]);
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
cfg->strength.lane[i][1]);
}
/* make sure all values are written to hardware before enabling phy */
wmb();
DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
udelay(100);
DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
switch (cfg->pll_source) {
case DSI_PLL_SOURCE_STANDALONE:
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
data &= ~BIT(2);
break;
case DSI_PLL_SOURCE_NATIVE:
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
data &= ~BIT(2);
break;
case DSI_PLL_SOURCE_NON_NATIVE:
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
data |= BIT(2);
break;
default:
break;
}
DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
/* Enable bias current for pll1 during split display case */
if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
pr_debug("[DSI_%d]Phy enabled\n", phy->index);
}
/**
* disable() - Disable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
pr_debug("[DSI_%d]Phy disabled\n", phy->index);
}
/**
* dsi_phy_hw_v2_0_idle_on() - Enable DSI PHY hardware during idle screen
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg)
{
int i = 0;
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
cfg->strength.lane[i][0]);
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
cfg->strength.lane[i][1]);
}
wmb(); /* make sure write happens */
pr_debug("[DSI_%d]Phy enabled out of idle screen\n", phy->index);
}
/**
* dsi_phy_hw_v2_0_idle_off() - Disable DSI PHY hardware during idle screen
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy)
{
int i = 0;
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), 0x1c);
DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i), 0x0);
wmb(); /* make sure write happens */
pr_debug("[DSI_%d]Phy disabled during idle screen\n", phy->index);
}
int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size)
{
int i = 0, j = 0;
if (size != (DSI_LANE_MAX * DSI_MAX_SETTINGS)) {
pr_err("Unexpected timing array size %d\n", size);
return -EINVAL;
}
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
for (j = 0; j < DSI_MAX_SETTINGS; j++) {
timing_cfg->lane[i][j] = *timing_val;
timing_val++;
}
}
return 0;
}
void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
{
u32 clamp_reg = 0;
if (!phy->phy_clamp_base) {
pr_debug("phy_clamp_base NULL\n");
return;
}
if (enable) {
clamp_reg |= BIT(0);
DSI_MISC_W32(phy, DSI_MDP_ULPS_CLAMP_ENABLE_OFF,
clamp_reg);
pr_debug("clamp enabled\n");
} else {
clamp_reg &= ~BIT(0);
DSI_MISC_W32(phy, DSI_MDP_ULPS_CLAMP_ENABLE_OFF,
clamp_reg);
pr_debug("clamp disabled\n");
}
}

464
msm/dsi/dsi_phy_hw_v3_0.c Normal file
View File

@@ -0,0 +1,464 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-hw:" fmt
#include <linux/math64.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include "dsi_hw.h"
#include "dsi_phy_hw.h"
#include "dsi_catalog.h"
#define DSIPHY_CMN_CLK_CFG0 0x010
#define DSIPHY_CMN_CLK_CFG1 0x014
#define DSIPHY_CMN_GLBL_CTRL 0x018
#define DSIPHY_CMN_RBUF_CTRL 0x01C
#define DSIPHY_CMN_VREG_CTRL 0x020
#define DSIPHY_CMN_CTRL_0 0x024
#define DSIPHY_CMN_CTRL_1 0x028
#define DSIPHY_CMN_CTRL_2 0x02C
#define DSIPHY_CMN_LANE_CFG0 0x030
#define DSIPHY_CMN_LANE_CFG1 0x034
#define DSIPHY_CMN_PLL_CNTRL 0x038
#define DSIPHY_CMN_LANE_CTRL0 0x098
#define DSIPHY_CMN_LANE_CTRL1 0x09C
#define DSIPHY_CMN_LANE_CTRL2 0x0A0
#define DSIPHY_CMN_LANE_CTRL3 0x0A4
#define DSIPHY_CMN_LANE_CTRL4 0x0A8
#define DSIPHY_CMN_TIMING_CTRL_0 0x0AC
#define DSIPHY_CMN_TIMING_CTRL_1 0x0B0
#define DSIPHY_CMN_TIMING_CTRL_2 0x0B4
#define DSIPHY_CMN_TIMING_CTRL_3 0x0B8
#define DSIPHY_CMN_TIMING_CTRL_4 0x0BC
#define DSIPHY_CMN_TIMING_CTRL_5 0x0C0
#define DSIPHY_CMN_TIMING_CTRL_6 0x0C4
#define DSIPHY_CMN_TIMING_CTRL_7 0x0C8
#define DSIPHY_CMN_TIMING_CTRL_8 0x0CC
#define DSIPHY_CMN_TIMING_CTRL_9 0x0D0
#define DSIPHY_CMN_TIMING_CTRL_10 0x0D4
#define DSIPHY_CMN_TIMING_CTRL_11 0x0D8
#define DSIPHY_CMN_PHY_STATUS 0x0EC
#define DSIPHY_CMN_LANE_STATUS0 0x0F4
#define DSIPHY_CMN_LANE_STATUS1 0x0F8
/* n = 0..3 for data lanes and n = 4 for clock lane */
#define DSIPHY_LNX_CFG0(n) (0x200 + (0x80 * (n)))
#define DSIPHY_LNX_CFG1(n) (0x204 + (0x80 * (n)))
#define DSIPHY_LNX_CFG2(n) (0x208 + (0x80 * (n)))
#define DSIPHY_LNX_CFG3(n) (0x20C + (0x80 * (n)))
#define DSIPHY_LNX_TEST_DATAPATH(n) (0x210 + (0x80 * (n)))
#define DSIPHY_LNX_PIN_SWAP(n) (0x214 + (0x80 * (n)))
#define DSIPHY_LNX_HSTX_STR_CTRL(n) (0x218 + (0x80 * (n)))
#define DSIPHY_LNX_OFFSET_TOP_CTRL(n) (0x21C + (0x80 * (n)))
#define DSIPHY_LNX_OFFSET_BOT_CTRL(n) (0x220 + (0x80 * (n)))
#define DSIPHY_LNX_LPTX_STR_CTRL(n) (0x224 + (0x80 * (n)))
#define DSIPHY_LNX_LPRX_CTRL(n) (0x228 + (0x80 * (n)))
#define DSIPHY_LNX_TX_DCTRL(n) (0x22C + (0x80 * (n)))
/**
* regulator_enable() - enable regulators for DSI PHY
* @phy: Pointer to DSI PHY hardware object.
* @reg_cfg: Regulator configuration for all DSI lanes.
*/
void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy,
struct dsi_phy_per_lane_cfgs *reg_cfg)
{
pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
/* Nothing to be done for DSI PHY regulator enable */
}
/**
* regulator_disable() - disable regulators
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy)
{
pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
/* Nothing to be done for DSI PHY regulator disable */
}
void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy)
{
DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
/* ensure that the FIFO is off */
wmb();
DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x1);
/* ensure that the FIFO is toggled back on */
wmb();
}
static int dsi_phy_hw_v3_0_is_pll_on(struct dsi_phy_hw *phy)
{
u32 data = 0;
data = DSI_R32(phy, DSIPHY_CMN_PLL_CNTRL);
mb(); /*make sure read happened */
return (data & BIT(0));
}
static void dsi_phy_hw_v3_0_config_lpcdrx(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, bool enable)
{
int phy_lane_0 = dsi_phy_conv_logical_to_phy_lane(&cfg->lane_map,
DSI_LOGICAL_LANE_0);
/*
* LPRX and CDRX need to enabled only for physical data lane
* corresponding to the logical data lane 0
*/
if (enable)
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0),
cfg->strength.lane[phy_lane_0][1]);
else
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), 0);
}
static void dsi_phy_hw_v3_0_lane_swap_config(struct dsi_phy_hw *phy,
struct dsi_lane_map *lane_map)
{
DSI_W32(phy, DSIPHY_CMN_LANE_CFG0,
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4)));
DSI_W32(phy, DSIPHY_CMN_LANE_CFG1,
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 4)));
}
static void dsi_phy_hw_v3_0_lane_settings(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int i;
u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x04, 0x01};
/* Strength ctrl settings */
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
DSI_W32(phy, DSIPHY_LNX_LPTX_STR_CTRL(i),
cfg->strength.lane[i][0]);
/*
* Disable LPRX and CDRX for all lanes. And later on, it will
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
DSI_W32(phy, DSIPHY_LNX_HSTX_STR_CTRL(i), 0x88);
}
dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, true);
/* other settings */
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
DSI_W32(phy, DSIPHY_LNX_CFG0(i), cfg->lanecfg.lane[i][0]);
DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
DSI_W32(phy, DSIPHY_LNX_CFG3(i), cfg->lanecfg.lane[i][3]);
DSI_W32(phy, DSIPHY_LNX_OFFSET_TOP_CTRL(i), 0x0);
DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
}
}
void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
{
u32 reg;
pr_debug("enable=%s\n", enable ? "true" : "false");
/*
* DSI PHY lane clamps, also referred to as PHY FreezeIO is
* enalbed by default as part of the initialization sequnce.
* This would get triggered anytime the chip FreezeIO is asserted.
*/
if (enable)
return;
/*
* Toggle BIT 0 to exlplictly release PHY freeze I/0 to disable
* the clamps.
*/
reg = DSI_R32(phy, DSIPHY_LNX_TX_DCTRL(3));
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg | BIT(0));
wmb(); /* Ensure that the freezeio bit is toggled */
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg & ~BIT(0));
wmb(); /* Ensure that the freezeio bit is toggled */
}
/**
* enable() - Enable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int rc = 0;
u32 status;
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
u32 data;
if (dsi_phy_hw_v3_0_is_pll_on(phy))
pr_warn("PLL turned on before configuring PHY\n");
/* wait for REFGEN READY */
rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
status, (status & BIT(0)), delay_us, timeout_us);
if (rc) {
pr_err("Ref gen not ready. Aborting\n");
return;
}
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
/* Assert PLL core reset */
DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
/* turn off resync FIFO */
DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
/* Select MS1 byte-clk */
DSI_W32(phy, DSIPHY_CMN_GLBL_CTRL, 0x10);
/* Enable LDO */
DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x59);
/* Configure PHY lane swap */
dsi_phy_hw_v3_0_lane_swap_config(phy, &cfg->lane_map);
/* DSI PHY timings */
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v3[0]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v3[1]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v3[2]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v3[3]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v3[4]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v3[5]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v3[6]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v3[7]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v3[8]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v3[9]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v3[10]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v3[11]);
/* Remove power down from all blocks */
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
/*power up lanes */
data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
/* TODO: only power up lanes that are used */
data |= 0x1F;
DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x1F);
/* Select full-rate mode */
DSI_W32(phy, DSIPHY_CMN_CTRL_2, 0x40);
switch (cfg->pll_source) {
case DSI_PLL_SOURCE_STANDALONE:
case DSI_PLL_SOURCE_NATIVE:
data = 0x0; /* internal PLL */
break;
case DSI_PLL_SOURCE_NON_NATIVE:
data = 0x1; /* external PLL */
break;
default:
break;
}
DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
/* DSI lane settings */
dsi_phy_hw_v3_0_lane_settings(phy, cfg);
pr_debug("[DSI_%d]Phy enabled\n", phy->index);
}
/**
* disable() - Disable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
u32 data = 0;
if (dsi_phy_hw_v3_0_is_pll_on(phy))
pr_warn("Turning OFF PHY while PLL is on\n");
dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, false);
data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
/* disable all lanes */
data &= ~0x1F;
DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0);
/* Turn off all PHY blocks */
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x00);
/* make sure phy is turned off */
wmb();
pr_debug("[DSI_%d]Phy disabled\n", phy->index);
}
int dsi_phy_hw_v3_0_wait_for_lane_idle(
struct dsi_phy_hw *phy, u32 lanes)
{
int rc = 0, val = 0;
u32 stop_state_mask = 0;
u32 const sleep_us = 10;
u32 const timeout_us = 100;
stop_state_mask = BIT(4); /* clock lane */
if (lanes & DSI_DATA_LANE_0)
stop_state_mask |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
stop_state_mask |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
stop_state_mask |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
stop_state_mask |= BIT(3);
pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
__func__, stop_state_mask);
rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
((val & stop_state_mask) == stop_state_mask),
sleep_us, timeout_us);
if (rc) {
pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
__func__, val);
return rc;
}
return 0;
}
void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes)
{
u32 reg = 0;
if (lanes & DSI_CLOCK_LANE)
reg = BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
/*
* ULPS entry request. Wait for short time to make sure
* that the lanes enter ULPS. Recommended as per HPG.
*/
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
usleep_range(100, 110);
/* disable LPRX and CDRX */
dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, false);
/* disable lane LDOs */
DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x19);
pr_debug("[DSI_PHY%d] ULPS requested for lanes 0x%x\n", phy->index,
lanes);
}
int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy)
{
int ret = 0, loop = 10, u_dly = 200;
u32 ln_status = 0;
while ((ln_status != 0x1f) && loop) {
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
wmb(); /* ensure register is committed */
loop--;
udelay(u_dly);
ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
pr_debug("trial no: %d\n", loop);
}
if (!loop)
pr_debug("could not reset phy lanes\n");
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
wmb(); /* ensure register is committed */
return ret;
}
void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes)
{
u32 reg = 0;
if (lanes & DSI_CLOCK_LANE)
reg = BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
/* enable lane LDOs */
DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x59);
/* enable LPRX and CDRX */
dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, true);
/* ULPS exit request */
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, reg);
usleep_range(1000, 1010);
/* Clear ULPS request flags on all lanes */
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, 0);
/* Clear ULPS exit flags on all lanes */
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, 0);
/*
* Sometimes when exiting ULPS, it is possible that some DSI
* lanes are not in the stop state which could lead to DSI
* commands not going through. To avoid this, force the lanes
* to be in stop state.
*/
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0);
usleep_range(100, 110);
}
u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
{
u32 lanes = 0;
lanes = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS0);
pr_debug("[DSI_PHY%d] lanes in ulps = 0x%x\n", phy->index, lanes);
return lanes;
}
bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
{
if (lanes & ulps_lanes)
return false;
return true;
}
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size)
{
int i = 0;
if (size != DSI_PHY_TIMING_V3_SIZE) {
pr_err("Unexpected timing array size %d\n", size);
return -EINVAL;
}
for (i = 0; i < size; i++)
timing_cfg->lane_v3[i] = timing_val[i];
return 0;
}

476
msm/dsi/dsi_phy_hw_v4_0.c Normal file
View File

@@ -0,0 +1,476 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-hw-v4: %s:" fmt, __func__
#include <linux/math64.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include "dsi_hw.h"
#include "dsi_phy_hw.h"
#include "dsi_catalog.h"
#define DSIPHY_CMN_REVISION_ID0 0x000
#define DSIPHY_CMN_REVISION_ID1 0x004
#define DSIPHY_CMN_REVISION_ID2 0x008
#define DSIPHY_CMN_REVISION_ID3 0x00C
#define DSIPHY_CMN_CLK_CFG0 0x010
#define DSIPHY_CMN_CLK_CFG1 0x014
#define DSIPHY_CMN_GLBL_CTRL 0x018
#define DSIPHY_CMN_RBUF_CTRL 0x01C
#define DSIPHY_CMN_VREG_CTRL_0 0x020
#define DSIPHY_CMN_CTRL_0 0x024
#define DSIPHY_CMN_CTRL_1 0x028
#define DSIPHY_CMN_CTRL_2 0x02C
#define DSIPHY_CMN_CTRL_3 0x030
#define DSIPHY_CMN_LANE_CFG0 0x034
#define DSIPHY_CMN_LANE_CFG1 0x038
#define DSIPHY_CMN_PLL_CNTRL 0x03C
#define DSIPHY_CMN_DPHY_SOT 0x040
#define DSIPHY_CMN_LANE_CTRL0 0x0A0
#define DSIPHY_CMN_LANE_CTRL1 0x0A4
#define DSIPHY_CMN_LANE_CTRL2 0x0A8
#define DSIPHY_CMN_LANE_CTRL3 0x0AC
#define DSIPHY_CMN_LANE_CTRL4 0x0B0
#define DSIPHY_CMN_TIMING_CTRL_0 0x0B4
#define DSIPHY_CMN_TIMING_CTRL_1 0x0B8
#define DSIPHY_CMN_TIMING_CTRL_2 0x0Bc
#define DSIPHY_CMN_TIMING_CTRL_3 0x0C0
#define DSIPHY_CMN_TIMING_CTRL_4 0x0C4
#define DSIPHY_CMN_TIMING_CTRL_5 0x0C8
#define DSIPHY_CMN_TIMING_CTRL_6 0x0CC
#define DSIPHY_CMN_TIMING_CTRL_7 0x0D0
#define DSIPHY_CMN_TIMING_CTRL_8 0x0D4
#define DSIPHY_CMN_TIMING_CTRL_9 0x0D8
#define DSIPHY_CMN_TIMING_CTRL_10 0x0DC
#define DSIPHY_CMN_TIMING_CTRL_11 0x0E0
#define DSIPHY_CMN_TIMING_CTRL_12 0x0E4
#define DSIPHY_CMN_TIMING_CTRL_13 0x0E8
#define DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0 0x0EC
#define DSIPHY_CMN_GLBL_HSTX_STR_CTRL_1 0x0F0
#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x0F4
#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x0F8
#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x0FC
#define DSIPHY_CMN_GLBL_LPTX_STR_CTRL 0x100
#define DSIPHY_CMN_GLBL_PEMPH_CTRL_0 0x104
#define DSIPHY_CMN_GLBL_PEMPH_CTRL_1 0x108
#define DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x10C
#define DSIPHY_CMN_VREG_CTRL_1 0x110
#define DSIPHY_CMN_CTRL_4 0x114
#define DSIPHY_CMN_PHY_STATUS 0x140
#define DSIPHY_CMN_LANE_STATUS0 0x148
#define DSIPHY_CMN_LANE_STATUS1 0x14C
/* n = 0..3 for data lanes and n = 4 for clock lane */
#define DSIPHY_LNX_CFG0(n) (0x200 + (0x80 * (n)))
#define DSIPHY_LNX_CFG1(n) (0x204 + (0x80 * (n)))
#define DSIPHY_LNX_CFG2(n) (0x208 + (0x80 * (n)))
#define DSIPHY_LNX_TEST_DATAPATH(n) (0x20C + (0x80 * (n)))
#define DSIPHY_LNX_PIN_SWAP(n) (0x210 + (0x80 * (n)))
#define DSIPHY_LNX_LPRX_CTRL(n) (0x214 + (0x80 * (n)))
#define DSIPHY_LNX_TX_DCTRL(n) (0x218 + (0x80 * (n)))
static int dsi_phy_hw_v4_0_is_pll_on(struct dsi_phy_hw *phy)
{
u32 data = 0;
data = DSI_R32(phy, DSIPHY_CMN_PLL_CNTRL);
mb(); /*make sure read happened */
return (data & BIT(0));
}
static void dsi_phy_hw_v4_0_config_lpcdrx(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, bool enable)
{
int phy_lane_0 = dsi_phy_conv_logical_to_phy_lane(&cfg->lane_map,
DSI_LOGICAL_LANE_0);
/*
* LPRX and CDRX need to enabled only for physical data lane
* corresponding to the logical data lane 0
*/
if (enable)
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0),
cfg->strength.lane[phy_lane_0][1]);
else
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), 0);
}
static void dsi_phy_hw_v4_0_lane_swap_config(struct dsi_phy_hw *phy,
struct dsi_lane_map *lane_map)
{
DSI_W32(phy, DSIPHY_CMN_LANE_CFG0,
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4)));
DSI_W32(phy, DSIPHY_CMN_LANE_CFG1,
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] |
(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 4)));
}
static void dsi_phy_hw_v4_0_lane_settings(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int i;
u8 tx_dctrl_v4[] = {0x00, 0x00, 0x00, 0x04, 0x01};
u8 tx_dctrl_v4_1[] = {0x40, 0x40, 0x40, 0x46, 0x41};
u8 *tx_dctrl;
if (phy->version == DSI_PHY_VERSION_4_1)
tx_dctrl = &tx_dctrl_v4_1[0];
else
tx_dctrl = &tx_dctrl_v4[0];
/* Strength ctrl settings */
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
/*
* Disable LPRX and CDRX for all lanes. And later on, it will
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
}
dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, true);
/* other settings */
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
DSI_W32(phy, DSIPHY_LNX_CFG0(i), cfg->lanecfg.lane[i][0]);
DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
}
if (cfg->force_clk_lane_hs) {
u32 reg = DSI_R32(phy, DSIPHY_CMN_LANE_CTRL1);
reg |= BIT(5) | BIT(6);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
}
}
/**
* enable() - Enable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
* @cfg: Per lane configurations for timing, strength and lane
* configurations.
*/
void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
int rc = 0;
u32 status;
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
u32 data;
bool less_than_1500_mhz = false;
u32 vreg_ctrl_0 = 0;
u32 glbl_str_swi_cal_sel_ctrl = 0;
u32 glbl_hstx_str_ctrl_0 = 0;
if (dsi_phy_hw_v4_0_is_pll_on(phy))
pr_warn("PLL turned on before configuring PHY\n");
/* wait for REFGEN READY */
rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
status, (status & BIT(0)), delay_us, timeout_us);
if (rc) {
pr_err("Ref gen not ready. Aborting\n");
return;
}
if (phy->version == DSI_PHY_VERSION_4_1) {
vreg_ctrl_0 = 0x58;
glbl_str_swi_cal_sel_ctrl = 0x00;
glbl_hstx_str_ctrl_0 = 0x88;
} else {
/* Alter PHY configurations if data rate less than 1.5GHZ*/
if (cfg->bit_clk_rate_hz < 1500000000)
less_than_1500_mhz = true;
vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
}
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
/* Assert PLL core reset */
DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
/* turn off resync FIFO */
DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
/* Configure PHY lane swap */
dsi_phy_hw_v4_0_lane_swap_config(phy, &cfg->lane_map);
/* Enable LDO */
DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x5c);
DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x00);
DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
glbl_str_swi_cal_sel_ctrl);
DSI_W32(phy, DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0);
DSI_W32(phy, DSIPHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, 0x03);
DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, 0x3c);
DSI_W32(phy, DSIPHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
/* Remove power down from all blocks */
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x1F);
/* Select full-rate mode */
DSI_W32(phy, DSIPHY_CMN_CTRL_2, 0x40);
switch (cfg->pll_source) {
case DSI_PLL_SOURCE_STANDALONE:
case DSI_PLL_SOURCE_NATIVE:
data = 0x0; /* internal PLL */
break;
case DSI_PLL_SOURCE_NON_NATIVE:
data = 0x1; /* external PLL */
break;
default:
break;
}
DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
/* DSI PHY timings */
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v4[0]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v4[1]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v4[2]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v4[3]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v4[4]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v4[5]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v4[6]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v4[7]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v4[8]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v4[9]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v4[10]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v4[11]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_12, timing->lane_v4[12]);
DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_13, timing->lane_v4[13]);
/* DSI lane settings */
dsi_phy_hw_v4_0_lane_settings(phy, cfg);
pr_debug("[DSI_%d]Phy enabled\n", phy->index);
}
/**
* disable() - Disable PHY hardware
* @phy: Pointer to DSI PHY hardware object.
*/
void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg)
{
u32 data = 0;
if (dsi_phy_hw_v4_0_is_pll_on(phy))
pr_warn("Turning OFF PHY while PLL is on\n");
dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, false);
data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
/* disable all lanes */
data &= ~0x1F;
DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0);
/* Turn off all PHY blocks */
DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x00);
/* make sure phy is turned off */
wmb();
pr_debug("[DSI_%d]Phy disabled\n", phy->index);
}
void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy)
{
DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
/* ensure that the FIFO is off */
wmb();
DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x1);
/* ensure that the FIFO is toggled back on */
wmb();
}
void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy)
{
u32 data = 0;
/*Turning off CLK_EN_SEL after retime buffer sync */
data = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
data &= ~BIT(4);
DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, data);
/* ensure that clk_en_sel bit is turned off */
wmb();
}
int dsi_phy_hw_v4_0_wait_for_lane_idle(
struct dsi_phy_hw *phy, u32 lanes)
{
int rc = 0, val = 0;
u32 stop_state_mask = 0;
u32 const sleep_us = 10;
u32 const timeout_us = 100;
stop_state_mask = BIT(4); /* clock lane */
if (lanes & DSI_DATA_LANE_0)
stop_state_mask |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
stop_state_mask |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
stop_state_mask |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
stop_state_mask |= BIT(3);
pr_debug("%s: polling for lanes to be in stop state, mask=0x%08x\n",
__func__, stop_state_mask);
rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
((val & stop_state_mask) == stop_state_mask),
sleep_us, timeout_us);
if (rc) {
pr_err("%s: lanes not in stop state, LANE_STATUS=0x%08x\n",
__func__, val);
return rc;
}
return 0;
}
void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes)
{
u32 reg = 0;
if (lanes & DSI_CLOCK_LANE)
reg = BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
if (cfg->force_clk_lane_hs)
reg |= BIT(5) | BIT(6);
/*
* ULPS entry request. Wait for short time to make sure
* that the lanes enter ULPS. Recommended as per HPG.
*/
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
usleep_range(100, 110);
/* disable LPRX and CDRX */
dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, false);
pr_debug("[DSI_PHY%d] ULPS requested for lanes 0x%x\n", phy->index,
lanes);
}
int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy)
{
int ret = 0, loop = 10, u_dly = 200;
u32 ln_status = 0;
while ((ln_status != 0x1f) && loop) {
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
wmb(); /* ensure register is committed */
loop--;
udelay(u_dly);
ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
pr_debug("trial no: %d\n", loop);
}
if (!loop)
pr_debug("could not reset phy lanes\n");
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
wmb(); /* ensure register is committed */
return ret;
}
void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes)
{
u32 reg = 0;
if (lanes & DSI_CLOCK_LANE)
reg = BIT(4);
if (lanes & DSI_DATA_LANE_0)
reg |= BIT(0);
if (lanes & DSI_DATA_LANE_1)
reg |= BIT(1);
if (lanes & DSI_DATA_LANE_2)
reg |= BIT(2);
if (lanes & DSI_DATA_LANE_3)
reg |= BIT(3);
/* enable LPRX and CDRX */
dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, true);
/* ULPS exit request */
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, reg);
usleep_range(1000, 1010);
/* Clear ULPS request flags on all lanes */
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, 0);
/* Clear ULPS exit flags on all lanes */
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, 0);
/*
* Sometimes when exiting ULPS, it is possible that some DSI
* lanes are not in the stop state which could lead to DSI
* commands not going through. To avoid this, force the lanes
* to be in stop state.
*/
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0);
usleep_range(100, 110);
if (cfg->force_clk_lane_hs) {
reg = BIT(5) | BIT(6);
DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
}
}
u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
{
u32 lanes = 0;
lanes = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS0);
pr_debug("[DSI_PHY%d] lanes in ulps = 0x%x\n", phy->index, lanes);
return lanes;
}
bool dsi_phy_hw_v4_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
{
if (lanes & ulps_lanes)
return false;
return true;
}
int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size)
{
int i = 0;
if (size != DSI_PHY_TIMING_V4_SIZE) {
pr_err("Unexpected timing array size %d\n", size);
return -EINVAL;
}
for (i = 0; i < size; i++)
timing_cfg->lane_v4[i] = timing_val[i];
return 0;
}

View File

@@ -0,0 +1,812 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-timing:" fmt
#include "dsi_phy_timing_calc.h"
static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
16, 18, 18, 24, 3, 8, 12 };
static int dsi_phy_cmn_validate_and_set(struct timing_entry *t,
char const *t_name)
{
if (t->rec & 0xffffff00) {
/* Output value can only be 8 bits */
pr_err("Incorrect %s rec value - %d\n", t_name, t->rec);
return -EINVAL;
}
t->reg_value = t->rec;
return 0;
}
/**
* calc_clk_prepare - calculates prepare timing params for clk lane.
*/
static int calc_clk_prepare(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc,
s32 *actual_frac,
s64 *actual_intermediate)
{
u64 multiplier = BIT(20);
struct timing_entry *t = &desc->clk_prepare;
int rc = 0;
u64 dividend, temp, temp_multiple;
s32 frac = 0;
s64 intermediate;
s64 clk_prep_actual;
dividend = ((t->rec_max - t->rec_min) *
clk_params->clk_prep_buf * multiplier);
temp = roundup(div_s64(dividend, 100), multiplier);
temp += (t->rec_min * multiplier);
t->rec = div_s64(temp, multiplier);
rc = dsi_phy_cmn_validate_and_set(t, "clk_prepare");
if (rc)
goto error;
/* calculate theoretical value */
temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
* multiplier;
intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
clk_prep_actual = div_s64((intermediate + frac), multiplier);
pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
*actual_frac = frac;
*actual_intermediate = intermediate;
error:
return rc;
}
/**
* calc_clk_zero - calculates zero timing params for clk lane.
*/
static int calc_clk_zero(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc,
s32 actual_frac, s64 actual_intermediate)
{
u64 const multiplier = BIT(20);
int rc = 0;
struct timing_entry *t = &desc->clk_zero;
s64 mipi_min, rec_temp1;
struct phy_timing_ops *ops = phy->ops.timing_ops;
mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
t->mipi_min = div_s64(mipi_min, multiplier);
rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
clk_params->tlpx_numer_ns);
if (ops->calc_clk_zero) {
t->rec_min = ops->calc_clk_zero(rec_temp1, multiplier);
} else {
rc = -EINVAL;
goto error;
}
t->rec_max = ((t->rec_min > 255) ? 511 : 255);
t->rec = DIV_ROUND_UP((((t->rec_max - t->rec_min) *
clk_params->clk_zero_buf) + (t->rec_min * 100)), 100);
rc = dsi_phy_cmn_validate_and_set(t, "clk_zero");
if (rc)
goto error;
pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_clk_trail - calculates prepare trail params for clk lane.
*/
static int calc_clk_trail(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc,
s64 *teot_clk_lane)
{
u64 const multiplier = BIT(20);
int rc = 0;
struct timing_entry *t = &desc->clk_trail;
u64 temp_multiple;
s32 frac;
s64 mipi_max_tr, rec_temp1, mipi_max;
s64 teot_clk_lane1;
struct phy_timing_ops *ops = phy->ops.timing_ops;
temp_multiple = div_s64(
(12 * multiplier * clk_params->tlpx_numer_ns),
clk_params->bitclk_mbps);
div_s64_rem(temp_multiple, multiplier, &frac);
mipi_max_tr = ((105 * multiplier) +
(temp_multiple + frac));
teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
t->mipi_max = div_s64(mipi_max, multiplier);
temp_multiple = div_s64(
(t->mipi_min * multiplier * clk_params->bitclk_mbps),
clk_params->tlpx_numer_ns);
div_s64_rem(temp_multiple, multiplier, &frac);
if (ops->calc_clk_trail_rec_min) {
t->rec_min = ops->calc_clk_trail_rec_min(temp_multiple,
frac, multiplier);
} else {
rc = -EINVAL;
goto error;
}
/* recommended max */
rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
clk_params->tlpx_numer_ns);
if (ops->calc_clk_trail_rec_max) {
t->rec_max = ops->calc_clk_trail_rec_max(rec_temp1, multiplier);
} else {
rc = -EINVAL;
goto error;
}
t->rec = DIV_ROUND_UP(
(((t->rec_max - t->rec_min) * clk_params->clk_trail_buf) +
(t->rec_min * 100)), 100);
rc = dsi_phy_cmn_validate_and_set(t, "clk_trail");
if (rc)
goto error;
*teot_clk_lane = teot_clk_lane1;
pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_hs_prepare - calculates prepare timing params for data lanes in HS.
*/
static int calc_hs_prepare(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc,
u64 *temp_mul)
{
u64 multiplier = BIT(20);
int rc = 0;
struct timing_entry *t = &desc->hs_prepare;
u64 temp_multiple, dividend, temp;
s32 frac;
s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
u32 low_clk_multiplier = 0;
if (clk_params->bitclk_mbps <= 120)
low_clk_multiplier = 2;
/* mipi min */
temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
clk_params->bitclk_mbps);
div_s64_rem(temp_multiple, multiplier, &frac);
mipi_min = (40 * multiplier) + (temp_multiple + frac);
t->mipi_min = div_s64(mipi_min, multiplier);
/* mipi_max */
temp_multiple = div_s64(
(6 * multiplier * clk_params->tlpx_numer_ns),
clk_params->bitclk_mbps);
div_s64_rem(temp_multiple, multiplier, &frac);
mipi_max = (85 * multiplier) + temp_multiple;
t->mipi_max = div_s64(mipi_max, multiplier);
/* recommended min */
temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
clk_params->tlpx_numer_ns);
temp_multiple -= (low_clk_multiplier * multiplier);
div_s64_rem(temp_multiple, multiplier, &frac);
rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
t->rec_min = div_s64(rec_temp1, multiplier);
/* recommended max */
temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
clk_params->tlpx_numer_ns);
temp_multiple -= (low_clk_multiplier * multiplier);
div_s64_rem(temp_multiple, multiplier, &frac);
rec_temp2 = rounddown((temp_multiple / 8), multiplier);
t->rec_max = div_s64(rec_temp2, multiplier);
/* register value */
dividend = ((rec_temp2 - rec_temp1) * clk_params->hs_prep_buf);
temp = roundup(div_u64(dividend, 100), multiplier);
t->rec = div_s64((temp + rec_temp1), multiplier);
rc = dsi_phy_cmn_validate_and_set(t, "hs_prepare");
if (rc)
goto error;
temp_multiple = div_s64(
(8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
clk_params->bitclk_mbps);
*temp_mul = temp_multiple;
pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_hs_zero - calculates zero timing params for data lanes in HS.
*/
static int calc_hs_zero(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc,
u64 temp_multiple)
{
u64 const multiplier = BIT(20);
int rc = 0;
struct timing_entry *t = &desc->hs_zero;
s64 rec_temp1, mipi_min;
struct phy_timing_ops *ops = phy->ops.timing_ops;
mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
clk_params->bitclk_mbps);
rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
t->mipi_min = div_s64(rec_temp1, multiplier);
/* recommended min */
rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
clk_params->tlpx_numer_ns);
if (ops->calc_hs_zero) {
t->rec_min = ops->calc_hs_zero(rec_temp1, multiplier);
} else {
rc = -EINVAL;
goto error;
}
t->rec_max = ((t->rec_min > 255) ? 511 : 255);
t->rec = DIV_ROUND_UP(
(((t->rec_max - t->rec_min) * clk_params->hs_zero_buf) +
(t->rec_min * 100)),
100);
rc = dsi_phy_cmn_validate_and_set(t, "hs_zero");
if (rc)
goto error;
pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_hs_trail - calculates trail timing params for data lanes in HS.
*/
static int calc_hs_trail(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc,
u64 teot_clk_lane)
{
int rc = 0;
struct timing_entry *t = &desc->hs_trail;
s64 rec_temp1;
struct phy_timing_ops *ops = phy->ops.timing_ops;
t->mipi_min = 60 +
mult_frac(clk_params->tlpx_numer_ns, 4,
clk_params->bitclk_mbps);
t->mipi_max = teot_clk_lane - clk_params->treot_ns;
if (ops->calc_hs_trail) {
ops->calc_hs_trail(clk_params, desc);
} else {
rc = -EINVAL;
goto error;
}
rec_temp1 = DIV_ROUND_UP(
((t->rec_max - t->rec_min) * clk_params->hs_trail_buf),
100);
t->rec = rec_temp1 + t->rec_min;
rc = dsi_phy_cmn_validate_and_set(t, "hs_trail");
if (rc)
goto error;
pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_hs_rqst - calculates rqst timing params for data lanes in HS.
*/
static int calc_hs_rqst(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
int rc = 0;
struct timing_entry *t = &desc->hs_rqst;
t->rec = DIV_ROUND_UP(
((t->mipi_min * clk_params->bitclk_mbps) -
(8 * clk_params->tlpx_numer_ns)),
(8 * clk_params->tlpx_numer_ns));
rc = dsi_phy_cmn_validate_and_set(t, "hs_rqst");
if (rc)
goto error;
pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_hs_exit - calculates exit timing params for data lanes in HS.
*/
static int calc_hs_exit(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
int rc = 0;
struct timing_entry *t = &desc->hs_exit;
t->rec_min = (DIV_ROUND_UP(
(t->mipi_min * clk_params->bitclk_mbps),
(8 * clk_params->tlpx_numer_ns)) - 1);
t->rec = DIV_ROUND_UP(
(((t->rec_max - t->rec_min) * clk_params->hs_exit_buf) +
(t->rec_min * 100)), 100);
rc = dsi_phy_cmn_validate_and_set(t, "hs_exit");
if (rc)
goto error;
pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_hs_rqst_clk - calculates rqst timing params for clock lane..
*/
static int calc_hs_rqst_clk(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
int rc = 0;
struct timing_entry *t = &desc->hs_rqst_clk;
t->rec = DIV_ROUND_UP(
((t->mipi_min * clk_params->bitclk_mbps) -
(8 * clk_params->tlpx_numer_ns)),
(8 * clk_params->tlpx_numer_ns));
rc = dsi_phy_cmn_validate_and_set(t, "hs_rqst_clk");
if (rc)
goto error;
pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* cal_clk_pulse_time - calculates clk pulse time in nsec
*/
static s64 cal_clk_pulse_time(u32 inp1, u32 inp2, u32 bitclk_mbps)
{
u64 const multiplier = BIT(20);
u64 clk_multiple;
s32 frac;
s64 temp, result;
clk_multiple = div_s64((inp1 * multiplier * 1000), bitclk_mbps);
div_s64_rem(clk_multiple, multiplier, &frac);
temp = (inp2 * multiplier) + (clk_multiple + frac);
result = div_s64(temp, multiplier);
return result;
}
/**
* calc_clk_post - calculates clk_post timing params for data lanes in HS.
*/
static int calc_clk_post(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
int rc = 0;
struct timing_entry *t = &desc->clk_post;
s64 rec_cal1, rec_cal2;
u32 input1;
/* mipi min */
t->mipi_min = cal_clk_pulse_time(52, 60, clk_params->bitclk_mbps);
/* recommended min
* = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
*/
rec_cal1 = cal_clk_pulse_time(16, 0, clk_params->bitclk_mbps);
input1 = (desc->hs_trail.reg_value + 1) * 8;
rec_cal2 = cal_clk_pulse_time(input1, 0, clk_params->bitclk_mbps);
rec_cal2 += t->mipi_min;
t->rec_min = div_s64(rec_cal2, rec_cal1) - 1;
/* recommended max */
t->rec_max = 255;
/* register value */
rec_cal1 = (t->rec_max - t->rec_min);
rec_cal2 = clk_params->clk_post_buf/100;
t->rec = rec_cal1 * rec_cal2 + t->rec_min;
rc = dsi_phy_cmn_validate_and_set(t, "clk_post");
if (rc)
goto error;
pr_debug("CLK_POST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* calc_clk_pre - calculates clk_pre timing params for data lanes in HS.
*/
static int calc_clk_pre(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
int rc = 0;
struct timing_entry *t = &desc->clk_pre;
s64 rec_temp1;
s64 clk_prepare, clk_zero, clk_16;
u32 input1;
s64 rec_cal1, rec_cal2;
/* mipi min */
t->mipi_min = cal_clk_pulse_time(8, 0, clk_params->bitclk_mbps);
/* recommended min
* val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
* val2 = (16 * bit_clk_ns)
* final = roundup(val1/val2, 0) - 1
*/
input1 = desc->clk_prepare.reg_value * 8;
clk_prepare = cal_clk_pulse_time(input1, 0, clk_params->bitclk_mbps);
input1 = (desc->clk_zero.reg_value + 1) * 8;
clk_zero = cal_clk_pulse_time(input1, 0, clk_params->bitclk_mbps);
clk_16 = cal_clk_pulse_time(16, 0, clk_params->bitclk_mbps);
rec_temp1 = 52 + clk_prepare + clk_zero + 54;
t->rec_min = div_s64(rec_temp1, clk_16) - 1;
/* recommended max */
t->rec_max = 255;
/* register value */
rec_cal1 = (t->rec_max - t->rec_min);
rec_cal2 = clk_params->clk_pre_buf/100;
t->rec = rec_cal1 * rec_cal2 + t->rec_min;
rc = dsi_phy_cmn_validate_and_set(t, "clk_pre");
if (rc)
goto error;
pr_debug("CLK_PRE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
t->reg_value);
error:
return rc;
}
/**
* dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
*/
static int dsi_phy_cmn_calc_timing_params(struct dsi_phy_hw *phy,
struct phy_clk_params *clk_params, struct phy_timing_desc *desc)
{
int rc = 0;
s32 actual_frac = 0;
s64 actual_intermediate = 0;
u64 temp_multiple;
s64 teot_clk_lane;
rc = calc_clk_prepare(phy, clk_params, desc, &actual_frac,
&actual_intermediate);
if (rc) {
pr_err("clk_prepare calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_clk_zero(phy, clk_params, desc,
actual_frac, actual_intermediate);
if (rc) {
pr_err("clk_zero calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_clk_trail(phy, clk_params, desc, &teot_clk_lane);
if (rc) {
pr_err("clk_trail calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_hs_prepare(phy, clk_params, desc, &temp_multiple);
if (rc) {
pr_err("hs_prepare calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_hs_zero(phy, clk_params, desc, temp_multiple);
if (rc) {
pr_err("hs_zero calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_hs_trail(phy, clk_params, desc, teot_clk_lane);
if (rc) {
pr_err("hs_trail calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_hs_rqst(phy, clk_params, desc);
if (rc) {
pr_err("hs_rqst calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_hs_exit(phy, clk_params, desc);
if (rc) {
pr_err("hs_exit calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_hs_rqst_clk(phy, clk_params, desc);
if (rc) {
pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_clk_post(phy, clk_params, desc);
if (rc) {
pr_err("clk_post calculations failed, rc=%d\n", rc);
goto error;
}
rc = calc_clk_pre(phy, clk_params, desc);
if (rc) {
pr_err("clk_pre calculations failed, rc=%d\n", rc);
goto error;
}
error:
return rc;
}
/**
* calculate_timing_params() - calculates timing parameters.
* @phy: Pointer to DSI PHY hardware object.
* @mode: Mode information for which timing has to be calculated.
* @config: DSI host configuration for this mode.
* @timing: Timing parameters for each lane which will be returned.
*/
int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
struct dsi_mode_info *mode,
struct dsi_host_common_cfg *host,
struct dsi_phy_per_lane_cfgs *timing)
{
/* constants */
u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
u32 const esc_clk_mmss_cc_prediv = 10;
u32 const tlpx_numer = 1000;
u32 const tr_eot = 20;
u32 const clk_prepare_spec_min = 38;
u32 const clk_prepare_spec_max = 95;
u32 const clk_trail_spec_min = 60;
u32 const hs_exit_spec_min = 100;
u32 const hs_exit_reco_max = 255;
u32 const hs_rqst_spec_min = 50;
/* local vars */
int rc = 0;
u32 h_total, v_total;
u32 inter_num;
u32 num_of_lanes = 0;
u32 bpp;
u64 x, y;
struct phy_timing_desc desc;
struct phy_clk_params clk_params = {0};
struct phy_timing_ops *ops = phy->ops.timing_ops;
memset(&desc, 0x0, sizeof(desc));
h_total = DSI_H_TOTAL_DSC(mode);
v_total = DSI_V_TOTAL(mode);
bpp = bits_per_pixel[host->dst_format];
inter_num = bpp * mode->refresh_rate;
if (host->data_lanes & DSI_DATA_LANE_0)
num_of_lanes++;
if (host->data_lanes & DSI_DATA_LANE_1)
num_of_lanes++;
if (host->data_lanes & DSI_DATA_LANE_2)
num_of_lanes++;
if (host->data_lanes & DSI_DATA_LANE_3)
num_of_lanes++;
x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
y = rounddown(x, 1);
clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);
clk_params.escclk_numer = esc_clk_mhz;
clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
clk_params.tlpx_numer_ns = tlpx_numer;
clk_params.treot_ns = tr_eot;
/* Setup default parameters */
desc.clk_prepare.mipi_min = clk_prepare_spec_min;
desc.clk_prepare.mipi_max = clk_prepare_spec_max;
desc.clk_trail.mipi_min = clk_trail_spec_min;
desc.hs_exit.mipi_min = hs_exit_spec_min;
desc.hs_exit.rec_max = hs_exit_reco_max;
desc.hs_rqst.mipi_min = hs_rqst_spec_min;
desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
if (ops->get_default_phy_params) {
ops->get_default_phy_params(&clk_params);
} else {
rc = -EINVAL;
goto error;
}
desc.clk_prepare.rec_min = DIV_ROUND_UP(
(desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
(8 * clk_params.tlpx_numer_ns)
);
desc.clk_prepare.rec_max = rounddown(
mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
1, (8 * clk_params.tlpx_numer_ns)),
1);
pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
clk_params.treot_ns);
rc = dsi_phy_cmn_calc_timing_params(phy, &clk_params, &desc);
if (rc) {
pr_err("Timing calc failed, rc=%d\n", rc);
goto error;
}
if (ops->update_timing_params) {
ops->update_timing_params(timing, &desc);
} else {
rc = -EINVAL;
goto error;
}
error:
return rc;
}
int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
enum dsi_phy_version version)
{
struct phy_timing_ops *ops = NULL;
if (version == DSI_PHY_VERSION_UNKNOWN ||
version >= DSI_PHY_VERSION_MAX || !phy) {
pr_err("Unsupported version: %d\n", version);
return -ENOTSUPP;
}
ops = kzalloc(sizeof(struct phy_timing_ops), GFP_KERNEL);
if (!ops)
return -EINVAL;
phy->ops.timing_ops = ops;
switch (version) {
case DSI_PHY_VERSION_2_0:
ops->get_default_phy_params =
dsi_phy_hw_v2_0_get_default_phy_params;
ops->calc_clk_zero =
dsi_phy_hw_v2_0_calc_clk_zero;
ops->calc_clk_trail_rec_min =
dsi_phy_hw_v2_0_calc_clk_trail_rec_min;
ops->calc_clk_trail_rec_max =
dsi_phy_hw_v2_0_calc_clk_trail_rec_max;
ops->calc_hs_zero =
dsi_phy_hw_v2_0_calc_hs_zero;
ops->calc_hs_trail =
dsi_phy_hw_v2_0_calc_hs_trail;
ops->update_timing_params =
dsi_phy_hw_v2_0_update_timing_params;
break;
case DSI_PHY_VERSION_3_0:
ops->get_default_phy_params =
dsi_phy_hw_v3_0_get_default_phy_params;
ops->calc_clk_zero =
dsi_phy_hw_v3_0_calc_clk_zero;
ops->calc_clk_trail_rec_min =
dsi_phy_hw_v3_0_calc_clk_trail_rec_min;
ops->calc_clk_trail_rec_max =
dsi_phy_hw_v3_0_calc_clk_trail_rec_max;
ops->calc_hs_zero =
dsi_phy_hw_v3_0_calc_hs_zero;
ops->calc_hs_trail =
dsi_phy_hw_v3_0_calc_hs_trail;
ops->update_timing_params =
dsi_phy_hw_v3_0_update_timing_params;
break;
case DSI_PHY_VERSION_4_0:
case DSI_PHY_VERSION_4_1:
ops->get_default_phy_params =
dsi_phy_hw_v4_0_get_default_phy_params;
ops->calc_clk_zero =
dsi_phy_hw_v4_0_calc_clk_zero;
ops->calc_clk_trail_rec_min =
dsi_phy_hw_v4_0_calc_clk_trail_rec_min;
ops->calc_clk_trail_rec_max =
dsi_phy_hw_v4_0_calc_clk_trail_rec_max;
ops->calc_hs_zero =
dsi_phy_hw_v4_0_calc_hs_zero;
ops->calc_hs_trail =
dsi_phy_hw_v4_0_calc_hs_trail;
ops->update_timing_params =
dsi_phy_hw_v4_0_update_timing_params;
break;
case DSI_PHY_VERSION_0_0_HPM:
case DSI_PHY_VERSION_0_0_LPM:
case DSI_PHY_VERSION_1_0:
default:
kfree(ops);
return -ENOTSUPP;
}
return 0;
}

View File

@@ -0,0 +1,159 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PHY_TIMING_CALC_H_
#define _DSI_PHY_TIMING_CALC_H_
#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/errno.h>
#include "dsi_defs.h"
#include "dsi_phy_hw.h"
#include "dsi_catalog.h"
/**
* struct timing_entry - Calculated values for each timing parameter.
* @mipi_min:
* @mipi_max:
* @rec_min:
* @rec_max:
* @rec:
* @reg_value: Value to be programmed in register.
*/
struct timing_entry {
s32 mipi_min;
s32 mipi_max;
s32 rec_min;
s32 rec_max;
s32 rec;
u8 reg_value;
};
/**
* struct phy_timing_desc - Timing parameters for DSI PHY.
*/
struct phy_timing_desc {
struct timing_entry clk_prepare;
struct timing_entry clk_zero;
struct timing_entry clk_trail;
struct timing_entry hs_prepare;
struct timing_entry hs_zero;
struct timing_entry hs_trail;
struct timing_entry hs_rqst;
struct timing_entry hs_rqst_clk;
struct timing_entry hs_exit;
struct timing_entry ta_go;
struct timing_entry ta_sure;
struct timing_entry ta_set;
struct timing_entry clk_post;
struct timing_entry clk_pre;
};
/**
* struct phy_clk_params - Clock parameters for PHY timing calculations.
*/
struct phy_clk_params {
u32 bitclk_mbps;
u32 escclk_numer;
u32 escclk_denom;
u32 tlpx_numer_ns;
u32 treot_ns;
u32 clk_prep_buf;
u32 clk_zero_buf;
u32 clk_trail_buf;
u32 hs_prep_buf;
u32 hs_zero_buf;
u32 hs_trail_buf;
u32 hs_rqst_buf;
u32 hs_exit_buf;
u32 clk_pre_buf;
u32 clk_post_buf;
};
/**
* Various Ops needed for auto-calculation of DSI PHY timing parameters.
*/
struct phy_timing_ops {
void (*get_default_phy_params)(struct phy_clk_params *params);
int32_t (*calc_clk_zero)(s64 rec_temp1, s64 mult);
int32_t (*calc_clk_trail_rec_min)(s64 temp_mul,
s64 frac, s64 mult);
int32_t (*calc_clk_trail_rec_max)(s64 temp1, s64 mult);
int32_t (*calc_hs_zero)(s64 temp1, s64 mult);
void (*calc_hs_trail)(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void (*update_timing_params)(struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc);
};
#define roundup64(x, y) \
({ u64 _tmp = (x)+(y)-1; do_div(_tmp, y); _tmp * y; })
/* DSI PHY timing functions for 14nm */
void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params);
int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult);
int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult);
int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_max(s64 temp1, s64 mult);
int32_t dsi_phy_hw_v2_0_calc_hs_zero(s64 temp1, s64 mult);
void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void dsi_phy_hw_v2_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc);
/* DSI PHY timing functions for 10nm */
void dsi_phy_hw_v3_0_get_default_phy_params(struct phy_clk_params *params);
int32_t dsi_phy_hw_v3_0_calc_clk_zero(s64 rec_temp1, s64 mult);
int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult);
int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_max(s64 temp1, s64 mult);
int32_t dsi_phy_hw_v3_0_calc_hs_zero(s64 temp1, s64 mult);
void dsi_phy_hw_v3_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void dsi_phy_hw_v3_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc);
/* DSI PHY timing functions for 7nm */
void dsi_phy_hw_v4_0_get_default_phy_params(struct phy_clk_params *params);
int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult);
int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult);
int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_max(s64 temp1, s64 mult);
int32_t dsi_phy_hw_v4_0_calc_hs_zero(s64 temp1, s64 mult);
void dsi_phy_hw_v4_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc);
void dsi_phy_hw_v4_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc);
#endif /* _DSI_PHY_TIMING_CALC_H_ */

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-timing:" fmt
#include "dsi_phy_timing_calc.h"
void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params)
{
params->clk_prep_buf = 50;
params->clk_zero_buf = 2;
params->clk_trail_buf = 30;
params->hs_prep_buf = 50;
params->hs_zero_buf = 10;
params->hs_trail_buf = 30;
params->hs_rqst_buf = 0;
params->hs_exit_buf = 10;
}
int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult)
{
s64 rec_temp2, rec_temp3;
rec_temp2 = (rec_temp1 - (11 * mult));
rec_temp3 = roundup64(div_s64(rec_temp2, 8), mult);
return (div_s64(rec_temp3, mult) - 3);
}
int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult)
{
s64 rec_temp1, rec_temp2, rec_temp3;
rec_temp1 = temp_mul + frac + (3 * mult);
rec_temp2 = div_s64(rec_temp1, 8);
rec_temp3 = roundup64(rec_temp2, mult);
return div_s64(rec_temp3, mult);
}
int32_t dsi_phy_hw_v2_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
{
s64 rec_temp2, rec_temp3;
rec_temp2 = temp1 + (3 * mult);
rec_temp3 = rec_temp2 / 8;
return div_s64(rec_temp3, mult);
}
int32_t dsi_phy_hw_v2_0_calc_hs_zero(s64 temp1, s64 mult)
{
s64 rec_temp2, rec_temp3, rec_min;
rec_temp2 = temp1 - (11 * mult);
rec_temp3 = roundup64((rec_temp2 / 8), mult);
rec_min = rec_temp3 - (3 * mult);
return div_s64(rec_min, mult);
}
void dsi_phy_hw_v2_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
s64 rec_temp1;
struct timing_entry *t = &desc->hs_trail;
t->rec_min = DIV_ROUND_UP(
((t->mipi_min * clk_params->bitclk_mbps) +
(3 * clk_params->tlpx_numer_ns)),
(8 * clk_params->tlpx_numer_ns));
rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
(3 * clk_params->tlpx_numer_ns));
t->rec_max = DIV_ROUND_UP_ULL(rec_temp1,
(8 * clk_params->tlpx_numer_ns));
}
void dsi_phy_hw_v2_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc)
{
int i = 0;
for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
timing->lane[i][0] = desc->hs_exit.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][1] = desc->clk_zero.reg_value;
else
timing->lane[i][1] = desc->hs_zero.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][2] = desc->clk_prepare.reg_value;
else
timing->lane[i][2] = desc->hs_prepare.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][3] = desc->clk_trail.reg_value;
else
timing->lane[i][3] = desc->hs_trail.reg_value;
if (i == DSI_LOGICAL_CLOCK_LANE)
timing->lane[i][4] = desc->hs_rqst_clk.reg_value;
else
timing->lane[i][4] = desc->hs_rqst.reg_value;
timing->lane[i][5] = 0x2;
timing->lane[i][6] = 0x4;
timing->lane[i][7] = 0xA0;
pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
timing->lane[i][1],
timing->lane[i][2],
timing->lane[i][3],
timing->lane[i][4]);
}
timing->count_per_lane = 8;
}

View File

@@ -0,0 +1,98 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-timing:" fmt
#include "dsi_phy_timing_calc.h"
void dsi_phy_hw_v3_0_get_default_phy_params(
struct phy_clk_params *params)
{
params->clk_prep_buf = 0;
params->clk_zero_buf = 0;
params->clk_trail_buf = 0;
params->hs_prep_buf = 0;
params->hs_zero_buf = 0;
params->hs_trail_buf = 0;
params->hs_rqst_buf = 0;
params->hs_exit_buf = 0;
}
int32_t dsi_phy_hw_v3_0_calc_clk_zero(s64 rec_temp1, s64 mult)
{
s64 rec_temp2, rec_temp3;
rec_temp2 = (rec_temp1 - mult);
rec_temp3 = roundup64(div_s64(rec_temp2, 8), mult);
return (div_s64(rec_temp3, mult) - 1);
}
int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult)
{
s64 rec_temp1, rec_temp2, rec_temp3;
rec_temp1 = temp_mul + frac;
rec_temp2 = div_s64(rec_temp1, 8);
rec_temp3 = roundup64(rec_temp2, mult);
return (div_s64(rec_temp3, mult) - 1);
}
int32_t dsi_phy_hw_v3_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
{
s64 rec_temp2;
rec_temp2 = temp1 / 8;
return (div_s64(rec_temp2, mult) - 1);
}
int32_t dsi_phy_hw_v3_0_calc_hs_zero(s64 temp1, s64 mult)
{
s64 rec_temp2, rec_min;
rec_temp2 = roundup64((temp1 / 8), mult);
rec_min = rec_temp2 - (1 * mult);
return div_s64(rec_min, mult);
}
void dsi_phy_hw_v3_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
s64 rec_temp1;
struct timing_entry *t = &desc->hs_trail;
t->rec_min = DIV_ROUND_UP(
(t->mipi_min * clk_params->bitclk_mbps),
(8 * clk_params->tlpx_numer_ns)) - 1;
rec_temp1 = (t->mipi_max * clk_params->bitclk_mbps);
t->rec_max =
(div_s64(rec_temp1, (8 * clk_params->tlpx_numer_ns))) - 1;
}
void dsi_phy_hw_v3_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc)
{
timing->lane_v3[0] = 0x00;
timing->lane_v3[1] = desc->clk_zero.reg_value;
timing->lane_v3[2] = desc->clk_prepare.reg_value;
timing->lane_v3[3] = desc->clk_trail.reg_value;
timing->lane_v3[4] = desc->hs_exit.reg_value;
timing->lane_v3[5] = desc->hs_zero.reg_value;
timing->lane_v3[6] = desc->hs_prepare.reg_value;
timing->lane_v3[7] = desc->hs_trail.reg_value;
timing->lane_v3[8] = desc->hs_rqst.reg_value;
timing->lane_v3[9] = 0x02;
timing->lane_v3[10] = 0x04;
timing->lane_v3[11] = 0x00;
pr_debug("[%d %d %d %d]\n", timing->lane_v3[0],
timing->lane_v3[1], timing->lane_v3[2], timing->lane_v3[3]);
pr_debug("[%d %d %d %d]\n", timing->lane_v3[4],
timing->lane_v3[5], timing->lane_v3[6], timing->lane_v3[7]);
pr_debug("[%d %d %d %d]\n", timing->lane_v3[8],
timing->lane_v3[9], timing->lane_v3[10], timing->lane_v3[11]);
timing->count_per_lane = 12;
}

View File

@@ -0,0 +1,101 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "dsi-phy-timing-v4: %s:" fmt, __func__
#include "dsi_phy_timing_calc.h"
void dsi_phy_hw_v4_0_get_default_phy_params(
struct phy_clk_params *params)
{
params->clk_prep_buf = 0;
params->clk_zero_buf = 0;
params->clk_trail_buf = 0;
params->hs_prep_buf = 0;
params->hs_zero_buf = 0;
params->hs_trail_buf = 0;
params->hs_rqst_buf = 0;
params->hs_exit_buf = 0;
}
int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult)
{
s64 rec_temp2, rec_temp3;
rec_temp2 = (rec_temp1 - mult);
rec_temp3 = roundup(div_s64(rec_temp2, 8), mult);
return (div_s64(rec_temp3, mult) - 1);
}
int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_min(s64 temp_mul,
s64 frac, s64 mult)
{
s64 rec_temp1, rec_temp2, rec_temp3;
rec_temp1 = temp_mul + frac;
rec_temp2 = div_s64(rec_temp1, 8);
rec_temp3 = roundup(rec_temp2, mult);
return (div_s64(rec_temp3, mult) - 1);
}
int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_max(s64 temp1, s64 mult)
{
s64 rec_temp2;
rec_temp2 = temp1 / 8;
return (div_s64(rec_temp2, mult) - 1);
}
int32_t dsi_phy_hw_v4_0_calc_hs_zero(s64 temp1, s64 mult)
{
s64 rec_temp2, rec_min;
rec_temp2 = roundup((temp1 / 8), mult);
rec_min = rec_temp2 - (1 * mult);
return div_s64(rec_min, mult);
}
void dsi_phy_hw_v4_0_calc_hs_trail(struct phy_clk_params *clk_params,
struct phy_timing_desc *desc)
{
s64 rec_temp1;
struct timing_entry *t = &desc->hs_trail;
t->rec_min = DIV_ROUND_UP(
(t->mipi_min * clk_params->bitclk_mbps),
(8 * clk_params->tlpx_numer_ns)) - 1;
rec_temp1 = (t->mipi_max * clk_params->bitclk_mbps);
t->rec_max =
(div_s64(rec_temp1, (8 * clk_params->tlpx_numer_ns))) - 1;
}
void dsi_phy_hw_v4_0_update_timing_params(
struct dsi_phy_per_lane_cfgs *timing,
struct phy_timing_desc *desc)
{
timing->lane_v4[0] = 0x00;
timing->lane_v4[1] = desc->clk_zero.reg_value;
timing->lane_v4[2] = desc->clk_prepare.reg_value;
timing->lane_v4[3] = desc->clk_trail.reg_value;
timing->lane_v4[4] = desc->hs_exit.reg_value;
timing->lane_v4[5] = desc->hs_zero.reg_value;
timing->lane_v4[6] = desc->hs_prepare.reg_value;
timing->lane_v4[7] = desc->hs_trail.reg_value;
timing->lane_v4[8] = desc->hs_rqst.reg_value;
timing->lane_v4[9] = 0x03;
timing->lane_v4[10] = 0x04;
timing->lane_v4[11] = 0x00;
timing->lane_v4[12] = 0x00;
timing->lane_v4[13] = 0x00;
pr_debug("[%d %d %d %d]\n", timing->lane_v4[0],
timing->lane_v4[1], timing->lane_v4[2], timing->lane_v4[3]);
pr_debug("[%d %d %d %d]\n", timing->lane_v4[4],
timing->lane_v4[5], timing->lane_v4[6], timing->lane_v4[7]);
pr_debug("[%d %d %d %d]\n", timing->lane_v4[8],
timing->lane_v4[9], timing->lane_v4[10], timing->lane_v4[11]);
pr_debug("[%d %d]\n", timing->lane_v4[12], timing->lane_v4[13]);
timing->count_per_lane = 14;
}

366
msm/dsi/dsi_pwr.c Normal file
View File

@@ -0,0 +1,366 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "dsi_pwr.h"
#include "dsi_parser.h"
/*
* dsi_pwr_parse_supply_node() - parse power supply node from root device node
*/
static int dsi_pwr_parse_supply_node(struct dsi_parser_utils *utils,
struct device_node *root,
struct dsi_regulator_info *regs)
{
int rc = 0;
int i = 0;
u32 tmp = 0;
struct device_node *node = NULL;
dsi_for_each_child_node(root, node) {
const char *st = NULL;
rc = utils->read_string(node, "qcom,supply-name", &st);
if (rc) {
pr_err("failed to read name, rc = %d\n", rc);
goto error;
}
snprintf(regs->vregs[i].vreg_name,
ARRAY_SIZE(regs->vregs[i].vreg_name),
"%s", st);
rc = utils->read_u32(node, "qcom,supply-min-voltage", &tmp);
if (rc) {
pr_err("failed to read min voltage, rc = %d\n", rc);
goto error;
}
regs->vregs[i].min_voltage = tmp;
rc = utils->read_u32(node, "qcom,supply-max-voltage", &tmp);
if (rc) {
pr_err("failed to read max voltage, rc = %d\n", rc);
goto error;
}
regs->vregs[i].max_voltage = tmp;
rc = utils->read_u32(node, "qcom,supply-enable-load", &tmp);
if (rc) {
pr_err("failed to read enable load, rc = %d\n", rc);
goto error;
}
regs->vregs[i].enable_load = tmp;
rc = utils->read_u32(node, "qcom,supply-disable-load", &tmp);
if (rc) {
pr_err("failed to read disable load, rc = %d\n", rc);
goto error;
}
regs->vregs[i].disable_load = tmp;
/* Optional values */
rc = utils->read_u32(node, "qcom,supply-pre-on-sleep", &tmp);
if (rc) {
pr_debug("pre-on-sleep not specified\n");
rc = 0;
} else {
regs->vregs[i].pre_on_sleep = tmp;
}
rc = utils->read_u32(node, "qcom,supply-pre-off-sleep", &tmp);
if (rc) {
pr_debug("pre-off-sleep not specified\n");
rc = 0;
} else {
regs->vregs[i].pre_off_sleep = tmp;
}
rc = utils->read_u32(node, "qcom,supply-post-on-sleep", &tmp);
if (rc) {
pr_debug("post-on-sleep not specified\n");
rc = 0;
} else {
regs->vregs[i].post_on_sleep = tmp;
}
rc = utils->read_u32(node, "qcom,supply-post-off-sleep", &tmp);
if (rc) {
pr_debug("post-off-sleep not specified\n");
rc = 0;
} else {
regs->vregs[i].post_off_sleep = tmp;
}
pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
regs->vregs[i].vreg_name,
regs->vregs[i].min_voltage,
regs->vregs[i].max_voltage,
regs->vregs[i].enable_load,
regs->vregs[i].disable_load);
++i;
}
error:
return rc;
}
/**
* dsi_pwr_enable_vregs() - enable/disable regulators
*/
static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
{
int rc = 0, i = 0;
struct dsi_vreg *vreg;
int num_of_v = 0;
if (enable) {
for (i = 0; i < regs->count; i++) {
vreg = &regs->vregs[i];
if (vreg->pre_on_sleep)
msleep(vreg->pre_on_sleep);
rc = regulator_set_load(vreg->vreg,
vreg->enable_load);
if (rc < 0) {
pr_err("Setting optimum mode failed for %s\n",
vreg->vreg_name);
goto error;
}
num_of_v = regulator_count_voltages(vreg->vreg);
if (num_of_v > 0) {
rc = regulator_set_voltage(vreg->vreg,
vreg->min_voltage,
vreg->max_voltage);
if (rc) {
pr_err("Set voltage(%s) fail, rc=%d\n",
vreg->vreg_name, rc);
goto error_disable_opt_mode;
}
}
rc = regulator_enable(vreg->vreg);
if (rc) {
pr_err("enable failed for %s, rc=%d\n",
vreg->vreg_name, rc);
goto error_disable_voltage;
}
if (vreg->post_on_sleep)
msleep(vreg->post_on_sleep);
}
} else {
for (i = (regs->count - 1); i >= 0; i--) {
if (regs->vregs[i].pre_off_sleep)
msleep(regs->vregs[i].pre_off_sleep);
(void)regulator_set_load(regs->vregs[i].vreg,
regs->vregs[i].disable_load);
(void)regulator_disable(regs->vregs[i].vreg);
if (regs->vregs[i].post_off_sleep)
msleep(regs->vregs[i].post_off_sleep);
}
}
return 0;
error_disable_opt_mode:
(void)regulator_set_load(regs->vregs[i].vreg,
regs->vregs[i].disable_load);
error_disable_voltage:
if (num_of_v > 0)
(void)regulator_set_voltage(regs->vregs[i].vreg,
0, regs->vregs[i].max_voltage);
error:
for (i--; i >= 0; i--) {
if (regs->vregs[i].pre_off_sleep)
msleep(regs->vregs[i].pre_off_sleep);
(void)regulator_set_load(regs->vregs[i].vreg,
regs->vregs[i].disable_load);
num_of_v = regulator_count_voltages(regs->vregs[i].vreg);
if (num_of_v > 0)
(void)regulator_set_voltage(regs->vregs[i].vreg,
0, regs->vregs[i].max_voltage);
(void)regulator_disable(regs->vregs[i].vreg);
if (regs->vregs[i].post_off_sleep)
msleep(regs->vregs[i].post_off_sleep);
}
return rc;
}
/**
* dsi_pwr_of_get_vreg_data - Parse regulator supply information
* @of_node: Device of node to parse for supply information.
* @regs: Pointer where regulator information will be copied to.
* @supply_name: Name of the supply node.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_of_get_vreg_data(struct dsi_parser_utils *utils,
struct dsi_regulator_info *regs,
char *supply_name)
{
int rc = 0;
struct device_node *supply_root_node = NULL;
if (!utils || !regs) {
pr_err("Bad params\n");
return -EINVAL;
}
regs->count = 0;
supply_root_node = utils->get_child_by_name(utils->data, supply_name);
if (!supply_root_node) {
supply_root_node = of_parse_phandle(utils->node,
supply_name, 0);
if (!supply_root_node) {
pr_debug("No supply entry present for %s\n",
supply_name);
return -EINVAL;
}
}
regs->count = utils->get_available_child_count(supply_root_node);
if (regs->count == 0) {
pr_err("No vregs defined for %s\n", supply_name);
return -EINVAL;
}
regs->vregs = kcalloc(regs->count, sizeof(*regs->vregs), GFP_KERNEL);
if (!regs->vregs) {
regs->count = 0;
return -ENOMEM;
}
rc = dsi_pwr_parse_supply_node(utils, supply_root_node, regs);
if (rc) {
pr_err("failed to parse supply node for %s, rc = %d\n",
supply_name, rc);
kfree(regs->vregs);
regs->vregs = NULL;
regs->count = 0;
}
return rc;
}
/**
* dsi_pwr_get_dt_vreg_data - parse regulator supply information
* @dev: Device whose of_node needs to be parsed.
* @regs: Pointer where regulator information will be copied to.
* @supply_name: Name of the supply node.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_get_dt_vreg_data(struct device *dev,
struct dsi_regulator_info *regs,
char *supply_name)
{
int rc = 0;
struct device_node *of_node = NULL;
struct device_node *supply_node = NULL;
struct device_node *supply_root_node = NULL;
struct dsi_parser_utils utils = *dsi_parser_get_of_utils();
if (!dev || !regs) {
pr_err("Bad params\n");
return -EINVAL;
}
of_node = dev->of_node;
regs->count = 0;
supply_root_node = of_get_child_by_name(of_node, supply_name);
if (!supply_root_node) {
supply_root_node = of_parse_phandle(of_node, supply_name, 0);
if (!supply_root_node) {
pr_debug("No supply entry present for %s\n",
supply_name);
return -EINVAL;
}
}
for_each_child_of_node(supply_root_node, supply_node)
regs->count++;
if (regs->count == 0) {
pr_err("No vregs defined for %s\n", supply_name);
return -EINVAL;
}
regs->vregs = devm_kcalloc(dev, regs->count, sizeof(*regs->vregs),
GFP_KERNEL);
if (!regs->vregs) {
regs->count = 0;
return -ENOMEM;
}
utils.data = of_node;
utils.node = of_node;
rc = dsi_pwr_parse_supply_node(&utils, supply_root_node, regs);
if (rc) {
pr_err("failed to parse supply node for %s, rc = %d\n",
supply_name, rc);
devm_kfree(dev, regs->vregs);
regs->vregs = NULL;
regs->count = 0;
}
return rc;
}
/**
* dsi_pwr_enable_regulator() - enable a set of regulators
* @regs: Pointer to set of regulators to enable or disable.
* @enable: Enable/Disable regulators.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable)
{
int rc = 0;
if (regs->count == 0) {
pr_debug("No valid regulators to enable\n");
return 0;
}
if (!regs->vregs) {
pr_err("Invalid params\n");
return -EINVAL;
}
if (enable) {
if (regs->refcount == 0) {
rc = dsi_pwr_enable_vregs(regs, true);
if (rc)
pr_err("failed to enable regulators\n");
}
regs->refcount++;
} else {
if (regs->refcount == 0) {
pr_err("Unbalanced regulator off:%s\n",
regs->vregs->vreg_name);
} else {
regs->refcount--;
if (regs->refcount == 0) {
rc = dsi_pwr_enable_vregs(regs, false);
if (rc)
pr_err("failed to disable vregs\n");
}
}
}
return rc;
}

86
msm/dsi/dsi_pwr.h Normal file
View File

@@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DSI_PWR_H_
#define _DSI_PWR_H_
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/regulator/consumer.h>
struct dsi_parser_utils;
/**
* struct dsi_vreg - regulator information for DSI regulators
* @vreg: Handle to the regulator.
* @vreg_name: Regulator name.
* @min_voltage: Minimum voltage in uV.
* @max_voltage: Maximum voltage in uV.
* @enable_load: Load, in uA, when enabled.
* @disable_load: Load, in uA, when disabled.
* @pre_on_sleep: Sleep, in ms, before enabling the regulator.
* @post_on_sleep: Sleep, in ms, after enabling the regulator.
* @pre_off_sleep: Sleep, in ms, before disabling the regulator.
* @post_off_sleep: Sleep, in ms, after disabling the regulator.
*/
struct dsi_vreg {
struct regulator *vreg;
char vreg_name[32];
u32 min_voltage;
u32 max_voltage;
u32 enable_load;
u32 disable_load;
u32 pre_on_sleep;
u32 post_on_sleep;
u32 pre_off_sleep;
u32 post_off_sleep;
};
/**
* struct dsi_regulator_info - set of vregs that are turned on/off together.
* @vregs: Array of dsi_vreg structures.
* @count: Number of vregs.
* @refcount: Reference counting for enabling.
*/
struct dsi_regulator_info {
struct dsi_vreg *vregs;
u32 count;
u32 refcount;
};
/**
* dsi_pwr_of_get_vreg_data - parse regulator supply information
* @of_node: Device of node to parse for supply information.
* @regs: Pointer where regulator information will be copied to.
* @supply_name: Name of the supply node.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_of_get_vreg_data(struct dsi_parser_utils *utils,
struct dsi_regulator_info *regs,
char *supply_name);
/**
* dsi_pwr_get_dt_vreg_data - parse regulator supply information
* @dev: Device whose of_node needs to be parsed.
* @regs: Pointer where regulator information will be copied to.
* @supply_name: Name of the supply node.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_get_dt_vreg_data(struct device *dev,
struct dsi_regulator_info *regs,
char *supply_name);
/**
* dsi_pwr_enable_regulator() - enable a set of regulators
* @regs: Pointer to set of regulators to enable or disable.
* @enable: Enable/Disable regulators.
*
* return: error code in case of failure or 0 for success.
*/
int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
#endif /* _DSI_PWR_H_ */

870
msm/msm_atomic.c Normal file
View File

@@ -0,0 +1,870 @@
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/msm_drm_notify.h>
#include <linux/notifier.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
#include "sde_trace.h"
#define MULTIPLE_CONN_DETECTED(x) (x > 1)
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
uint32_t crtc_mask;
bool nonblock;
struct kthread_work commit_work;
};
static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list);
/**
* msm_drm_register_client - register a client notifier
* @nb: notifier block to callback on events
*
* This function registers a notifier callback function
* to msm_drm_notifier_list, which would be called when
* received unblank/power down event.
*/
int msm_drm_register_client(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&msm_drm_notifier_list,
nb);
}
EXPORT_SYMBOL(msm_drm_register_client);
/**
* msm_drm_unregister_client - unregister a client notifier
* @nb: notifier block to callback on events
*
* This function unregisters the callback function from
* msm_drm_notifier_list.
*/
int msm_drm_unregister_client(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&msm_drm_notifier_list,
nb);
}
EXPORT_SYMBOL(msm_drm_unregister_client);
/**
* msm_drm_notifier_call_chain - notify clients of drm_events
* @val: event MSM_DRM_EARLY_EVENT_BLANK or MSM_DRM_EVENT_BLANK
* @v: notifier data, inculde display id and display blank
* event(unblank or power down).
*/
static int msm_drm_notifier_call_chain(unsigned long val, void *v)
{
return blocking_notifier_call_chain(&msm_drm_notifier_list, val,
v);
}
static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state, bool enable)
{
struct drm_connector *connector = NULL;
struct drm_connector_state *conn_state = NULL;
int i = 0;
int conn_cnt = 0;
if (msm_is_mode_seamless(&crtc_state->mode) ||
msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
return true;
if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
return true;
if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
for_each_old_connector_in_state(state, connector,
conn_state, i) {
if ((conn_state->crtc == crtc_state->crtc) ||
(connector->state->crtc ==
crtc_state->crtc))
conn_cnt++;
if (MULTIPLE_CONN_DETECTED(conn_cnt))
return true;
}
}
return false;
}
static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
struct drm_connector_state *old_conn_state, bool enable)
{
if (!old_conn_state || !old_conn_state->crtc)
return false;
if (!old_conn_state->crtc->state->mode_changed &&
!old_conn_state->crtc->state->active_changed &&
old_conn_state->crtc->state->connectors_changed) {
if (old_conn_state->crtc == connector->state->crtc)
return true;
}
if (enable)
return false;
if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
return true;
if (msm_is_mode_seamless_vrr(
&connector->encoder->crtc->state->adjusted_mode))
return true;
if (msm_is_mode_seamless_dms(
&connector->encoder->crtc->state->adjusted_mode))
return true;
return false;
}
/* clear specified crtcs (no longer pending update) */
static void commit_destroy(struct msm_commit *c)
{
struct msm_drm_private *priv = c->dev->dev_private;
uint32_t crtc_mask = c->crtc_mask;
/* End_atomic */
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
if (c->nonblock)
kfree(c);
}
static void msm_atomic_wait_for_commit_done(
struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
int i;
for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
if (!new_crtc_state->active)
continue;
if (drm_crtc_vblank_get(crtc))
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
drm_crtc_vblank_put(crtc);
}
}
static void
msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct msm_drm_notifier notifier_data;
int i, blank;
SDE_ATRACE_BEGIN("msm_disable");
for_each_old_connector_in_state(old_state, connector,
old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_crtc_state *old_crtc_state;
unsigned int crtc_idx;
/*
* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state.
*/
if (!old_conn_state->crtc)
continue;
crtc_idx = drm_crtc_index(old_conn_state->crtc);
old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
old_conn_state->crtc);
if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
continue;
encoder = old_conn_state->best_encoder;
/* We shouldn't get this far if we didn't previously have
* an encoder.. but WARN_ON() rather than explode.
*/
if (WARN_ON(!encoder))
continue;
if (_msm_seamless_for_conn(connector, old_conn_state, false))
continue;
funcs = encoder->helper_private;
DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
if (connector->state->crtc &&
connector->state->crtc->state->active_changed) {
blank = MSM_DRM_BLANK_POWERDOWN;
notifier_data.data = &blank;
notifier_data.id = crtc_idx;
msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
&notifier_data);
}
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
*/
drm_bridge_disable(encoder->bridge);
/* Right function depends upon target state. */
if (connector->state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
funcs->disable(encoder);
else
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
drm_bridge_post_disable(encoder->bridge);
if (connector->state->crtc &&
connector->state->crtc->state->active_changed) {
DRM_DEBUG_ATOMIC("Notify blank\n");
msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
&notifier_data);
}
}
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Shut down everything that needs a full modeset. */
if (!drm_atomic_crtc_needs_modeset(crtc->state))
continue;
if (!old_crtc_state->active)
continue;
if (_msm_seamless_for_crtc(old_state, crtc->state, false))
continue;
funcs = crtc->helper_private;
DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
crtc->base.id);
/* Right function depends upon target state. */
if (crtc->state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->disable)
funcs->disable(crtc);
else
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
SDE_ATRACE_END("msm_disable");
}
static void
msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
int i;
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
if (!crtc->state->mode_changed)
continue;
funcs = crtc->helper_private;
if (crtc->state->enable && funcs->mode_set_nofb) {
DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
crtc->base.id);
funcs->mode_set_nofb(crtc);
}
}
for_each_old_connector_in_state(old_state, connector,
old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_crtc_state *new_crtc_state;
struct drm_encoder *encoder;
struct drm_display_mode *mode, *adjusted_mode;
if (!connector->state->best_encoder)
continue;
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
new_crtc_state = connector->state->crtc->state;
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
if (!new_crtc_state->mode_changed &&
new_crtc_state->connectors_changed) {
if (_msm_seamless_for_conn(connector,
old_conn_state, false))
continue;
} else if (!new_crtc_state->mode_changed) {
continue;
}
DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call mode_set hooks twice.
*/
if (funcs->mode_set)
funcs->mode_set(encoder, mode, adjusted_mode);
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
}
/**
* msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function shuts down all the outputs that need to be shut down and
* prepares them (if required) with the new mode.
*
* For compatibility with legacy crtc helpers this should be called before
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
msm_disable_outputs(dev, old_state);
drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
msm_crtc_set_mode(dev, old_state);
}
/**
* msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
* @dev: DRM device
* @old_state: atomic state object with old state structures
*
* This function enables all the outputs with the new configuration which had to
* be turned off for the update.
*
* For compatibility with legacy crtc helpers this should be called after
* drm_atomic_helper_commit_planes(), which is what the default commit function
* does. But drivers with different needs can group the modeset commits together
* and do the plane commits at the end. This is useful for drivers doing runtime
* PM since planes updates then only happen when the CRTC is actually enabled.
*/
static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
struct msm_drm_notifier notifier_data;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
int bridge_enable_count = 0;
int i, blank;
bool splash = false;
SDE_ATRACE_BEGIN("msm_enable");
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state,
new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
if (!new_crtc_state->active)
continue;
if (_msm_seamless_for_crtc(old_state, crtc->state, true))
continue;
funcs = crtc->helper_private;
if (crtc->state->enable) {
DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
crtc->base.id);
if (funcs->atomic_enable)
funcs->atomic_enable(crtc, old_crtc_state);
else
funcs->commit(crtc);
}
if (msm_needs_vblank_pre_modeset(
&new_crtc_state->adjusted_mode))
drm_crtc_wait_one_vblank(crtc);
}
for_each_new_connector_in_state(old_state, connector,
new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_connector_state *old_conn_state;
if (!new_conn_state->best_encoder)
continue;
if (!new_conn_state->crtc->state->active ||
!drm_atomic_crtc_needs_modeset(
new_conn_state->crtc->state))
continue;
old_conn_state = drm_atomic_get_old_connector_state(
old_state, connector);
if (_msm_seamless_for_conn(connector, old_conn_state, true))
continue;
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
if (kms && kms->funcs && kms->funcs->check_for_splash)
splash = kms->funcs->check_for_splash(kms);
if (splash || (connector->state->crtc &&
connector->state->crtc->state->active_changed)) {
blank = MSM_DRM_BLANK_UNBLANK;
notifier_data.data = &blank;
notifier_data.id =
connector->state->crtc->index;
DRM_DEBUG_ATOMIC("Notify early unblank\n");
msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
&notifier_data);
}
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
*/
drm_bridge_pre_enable(encoder->bridge);
++bridge_enable_count;
if (funcs->enable)
funcs->enable(encoder);
else
funcs->commit(encoder);
}
if (kms && kms->funcs && kms->funcs->commit) {
DRM_DEBUG_ATOMIC("triggering commit\n");
kms->funcs->commit(kms, old_state);
}
/* If no bridges were pre_enabled, skip iterating over them again */
if (bridge_enable_count == 0) {
SDE_ATRACE_END("msm_enable");
return;
}
for_each_new_connector_in_state(old_state, connector,
new_conn_state, i) {
struct drm_encoder *encoder;
struct drm_connector_state *old_conn_state;
if (!new_conn_state->best_encoder)
continue;
if (!new_conn_state->crtc->state->active ||
!drm_atomic_crtc_needs_modeset(
new_conn_state->crtc->state))
continue;
old_conn_state = drm_atomic_get_old_connector_state(
old_state, connector);
if (_msm_seamless_for_conn(connector, old_conn_state, true))
continue;
encoder = connector->state->best_encoder;
DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
drm_bridge_enable(encoder->bridge);
if (splash || (connector->state->crtc &&
connector->state->crtc->state->active_changed)) {
DRM_DEBUG_ATOMIC("Notify unblank\n");
msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
&notifier_data);
}
}
SDE_ATRACE_END("msm_enable");
}
int msm_atomic_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
if (!new_state->fb)
return 0;
obj = msm_framebuffer_bo(new_state->fb, 0);
msm_obj = to_msm_bo(obj);
fence = reservation_object_get_excl_rcu(msm_obj->resv);
drm_atomic_set_fence_for_plane(new_state, fence);
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
static void complete_commit(struct msm_commit *c)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
drm_atomic_helper_wait_for_fences(dev, state, false);
kms->funcs->prepare_commit(kms, state);
msm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
msm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
* due to (potentially) unref'ing the outgoing fb's
* before the vblank when the disable has latched.
*
* But if it did wait on disabled (or newly disabled)
* CRTCs, that would be racy (ie. we could have missed
* the irq. We need some way to poll for pipe shut
* down. Or just live with occasionally hitting the
* timeout in the CRTC disable path (which really should
* not be critical path)
*/
msm_atomic_wait_for_commit_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
kms->funcs->complete_commit(kms, state);
drm_atomic_state_put(state);
commit_destroy(c);
}
static void _msm_drm_commit_work_cb(struct kthread_work *work)
{
struct msm_commit *commit = NULL;
if (!work) {
DRM_ERROR("%s: Invalid commit work data!\n", __func__);
return;
}
commit = container_of(work, struct msm_commit, commit_work);
SDE_ATRACE_BEGIN("complete_commit");
complete_commit(commit);
SDE_ATRACE_END("complete_commit");
}
static struct msm_commit *commit_init(struct drm_atomic_state *state,
bool nonblock)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
c->dev = state->dev;
c->state = state;
c->nonblock = nonblock;
kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);
return c;
}
/* Start display thread function */
static void msm_atomic_commit_dispatch(struct drm_device *dev,
struct drm_atomic_state *state, struct msm_commit *commit)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct drm_crtc_state *crtc_state = NULL;
int ret = -EINVAL, i = 0, j = 0;
bool nonblock;
/* cache since work will kfree commit in non-blocking case */
nonblock = commit->nonblock;
for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
for (j = 0; j < priv->num_crtcs; j++) {
if (priv->disp_thread[j].crtc_id ==
crtc->base.id) {
if (priv->disp_thread[j].thread) {
kthread_queue_work(
&priv->disp_thread[j].worker,
&commit->commit_work);
/* only return zero if work is
* queued successfully.
*/
ret = 0;
} else {
DRM_ERROR(" Error for crtc_id: %d\n",
priv->disp_thread[j].crtc_id);
}
break;
}
}
/*
* TODO: handle cases where there will be more than
* one crtc per commit cycle. Remove this check then.
* Current assumption is there will be only one crtc
* per commit cycle.
*/
if (j < priv->num_crtcs)
break;
}
if (ret) {
/**
* this is not expected to happen, but at this point the state
* has been swapped, but we couldn't dispatch to a crtc thread.
* fallback now to a synchronous complete_commit to try and
* ensure that SW and HW state don't get out of sync.
*/
DRM_ERROR("failed to dispatch commit to any CRTC\n");
complete_commit(commit);
} else if (!nonblock) {
kthread_flush_work(&commit->commit_work);
}
/* free nonblocking commits in this context, after processing */
if (!nonblock)
kfree(commit);
}
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
* @nonblock: nonblocking commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails.
*
* RETURNS
* Zero for success or -errno.
*/
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_commit *c;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
int i, ret;
if (!priv || priv->shutdown_in_progress) {
DRM_ERROR("priv is null or shutdwon is in-progress\n");
return -EINVAL;
}
SDE_ATRACE_BEGIN("atomic_commit");
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) {
SDE_ATRACE_END("atomic_commit");
return ret;
}
c = commit_init(state, nonblock);
if (!c) {
ret = -ENOMEM;
goto error;
}
/*
* Figure out what crtcs we have:
*/
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
c->crtc_mask |= drm_crtc_mask(crtc);
/*
* Figure out what fence to wait for:
*/
for_each_oldnew_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if ((new_plane_state->fb != old_plane_state->fb)
&& new_plane_state->fb) {
struct drm_gem_object *obj =
msm_framebuffer_bo(new_plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_fence *fence =
reservation_object_get_excl_rcu(msm_obj->resv);
drm_atomic_set_fence_for_plane(new_plane_state, fence);
}
}
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
/* Start Atomic */
spin_lock(&priv->pending_crtcs_event.lock);
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
!(priv->pending_crtcs & c->crtc_mask));
if (ret == 0) {
DBG("start: %08x", c->crtc_mask);
priv->pending_crtcs |= c->crtc_mask;
}
spin_unlock(&priv->pending_crtcs_event.lock);
if (ret)
goto err_free;
WARN_ON(drm_atomic_helper_swap_state(state, false) < 0);
/*
* Provide the driver a chance to prepare for output fences. This is
* done after the point of no return, but before asynchronous commits
* are dispatched to work queues, so that the fence preparation is
* finished before the .atomic_commit returns.
*/
if (priv && priv->kms && priv->kms->funcs &&
priv->kms->funcs->prepare_fence)
priv->kms->funcs->prepare_fence(priv->kms, state);
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout
*/
drm_atomic_state_get(state);
msm_atomic_commit_dispatch(dev, state, c);
SDE_ATRACE_END("atomic_commit");
return 0;
err_free:
kfree(c);
error:
drm_atomic_helper_cleanup_planes(dev, state);
SDE_ATRACE_END("atomic_commit");
return ret;
}
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
{
struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
kfree(state);
return NULL;
}
return &state->base;
}
void msm_atomic_state_clear(struct drm_atomic_state *s)
{
struct msm_kms_state *state = to_kms_state(s);
drm_atomic_state_default_clear(&state->base);
kfree(state->state);
state->state = NULL;
}
void msm_atomic_state_free(struct drm_atomic_state *state)
{
kfree(to_kms_state(state)->state);
drm_atomic_state_default_release(state);
kfree(state);
}
void msm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
}

2071
msm/msm_drv.c Normal file

File diff suppressed because it is too large Load Diff

999
msm/msm_drv.h Normal file
View File

@@ -0,0 +1,999 @@
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_DRV_H__
#define __MSM_DRV_H__
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/component.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <linux/sde_io_util.h>
#include <asm/sizes.h>
#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
#include "sde_power_handle.h"
#define GET_MAJOR_REV(rev) ((rev) >> 28)
#define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF)
#define GET_STEP_REV(rev) ((rev) & 0xFFFF)
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
struct msm_mdss;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
#define MAX_CRTCS 8
#define MAX_PLANES 20
#define MAX_ENCODERS 8
#define MAX_BRIDGES 8
#define MAX_CONNECTORS 8
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
/* update the refcount when user driver calls power_ctrl IOCTL */
unsigned short enable_refcnt;
/* protects enable_refcnt */
struct mutex power_lock;
};
enum msm_mdp_plane_property {
/* blob properties, always put these first */
PLANE_PROP_CSC_V1,
PLANE_PROP_CSC_DMA_V1,
PLANE_PROP_INFO,
PLANE_PROP_SCALER_LUT_ED,
PLANE_PROP_SCALER_LUT_CIR,
PLANE_PROP_SCALER_LUT_SEP,
PLANE_PROP_SKIN_COLOR,
PLANE_PROP_SKY_COLOR,
PLANE_PROP_FOLIAGE_COLOR,
PLANE_PROP_VIG_GAMUT,
PLANE_PROP_VIG_IGC,
PLANE_PROP_DMA_IGC,
PLANE_PROP_DMA_GC,
/* # of blob properties */
PLANE_PROP_BLOBCOUNT,
/* range properties */
PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
PLANE_PROP_ALPHA,
PLANE_PROP_COLOR_FILL,
PLANE_PROP_H_DECIMATE,
PLANE_PROP_V_DECIMATE,
PLANE_PROP_INPUT_FENCE,
PLANE_PROP_HUE_ADJUST,
PLANE_PROP_SATURATION_ADJUST,
PLANE_PROP_VALUE_ADJUST,
PLANE_PROP_CONTRAST_ADJUST,
PLANE_PROP_EXCL_RECT_V1,
PLANE_PROP_PREFILL_SIZE,
PLANE_PROP_PREFILL_TIME,
PLANE_PROP_SCALER_V1,
PLANE_PROP_SCALER_V2,
PLANE_PROP_INVERSE_PMA,
/* enum/bitmask properties */
PLANE_PROP_BLEND_OP,
PLANE_PROP_SRC_CONFIG,
PLANE_PROP_FB_TRANSLATION_MODE,
PLANE_PROP_MULTIRECT_MODE,
/* total # of properties */
PLANE_PROP_COUNT
};
enum msm_mdp_crtc_property {
CRTC_PROP_INFO,
CRTC_PROP_DEST_SCALER_LUT_ED,
CRTC_PROP_DEST_SCALER_LUT_CIR,
CRTC_PROP_DEST_SCALER_LUT_SEP,
/* # of blob properties */
CRTC_PROP_BLOBCOUNT,
/* range properties */
CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT,
CRTC_PROP_OUTPUT_FENCE,
CRTC_PROP_OUTPUT_FENCE_OFFSET,
CRTC_PROP_DIM_LAYER_V1,
CRTC_PROP_CORE_CLK,
CRTC_PROP_CORE_AB,
CRTC_PROP_CORE_IB,
CRTC_PROP_LLCC_AB,
CRTC_PROP_LLCC_IB,
CRTC_PROP_DRAM_AB,
CRTC_PROP_DRAM_IB,
CRTC_PROP_ROT_PREFILL_BW,
CRTC_PROP_ROT_CLK,
CRTC_PROP_ROI_V1,
CRTC_PROP_SECURITY_LEVEL,
CRTC_PROP_IDLE_TIMEOUT,
CRTC_PROP_DEST_SCALER,
CRTC_PROP_CAPTURE_OUTPUT,
CRTC_PROP_IDLE_PC_STATE,
/* total # of properties */
CRTC_PROP_COUNT
};
enum msm_mdp_conn_property {
/* blob properties, always put these first */
CONNECTOR_PROP_SDE_INFO,
CONNECTOR_PROP_MODE_INFO,
CONNECTOR_PROP_HDR_INFO,
CONNECTOR_PROP_EXT_HDR_INFO,
CONNECTOR_PROP_PP_DITHER,
CONNECTOR_PROP_HDR_METADATA,
/* # of blob properties */
CONNECTOR_PROP_BLOBCOUNT,
/* range properties */
CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT,
CONNECTOR_PROP_RETIRE_FENCE,
CONNECTOR_PROP_DST_X,
CONNECTOR_PROP_DST_Y,
CONNECTOR_PROP_DST_W,
CONNECTOR_PROP_DST_H,
CONNECTOR_PROP_ROI_V1,
CONNECTOR_PROP_BL_SCALE,
CONNECTOR_PROP_SV_BL_SCALE,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
CONNECTOR_PROP_TOPOLOGY_CONTROL,
CONNECTOR_PROP_AUTOREFRESH,
CONNECTOR_PROP_LP,
CONNECTOR_PROP_FB_TRANSLATION_MODE,
CONNECTOR_PROP_QSYNC_MODE,
CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE,
/* total # of properties */
CONNECTOR_PROP_COUNT
};
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
/**
* enum msm_display_compression_type - compression method used for pixel stream
* @MSM_DISPLAY_COMPRESSION_NONE: Pixel data is not compressed
* @MSM_DISPLAY_COMPRESSION_DSC: DSC compresison is used
*/
enum msm_display_compression_type {
MSM_DISPLAY_COMPRESSION_NONE,
MSM_DISPLAY_COMPRESSION_DSC,
};
/**
* enum msm_display_compression_ratio - compression ratio
* @MSM_DISPLAY_COMPRESSION_NONE: no compression
* @MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: 2 to 1 compression
* @MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: 3 to 1 compression
*/
enum msm_display_compression_ratio {
MSM_DISPLAY_COMPRESSION_RATIO_NONE,
MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1,
MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1,
MSM_DISPLAY_COMPRESSION_RATIO_MAX,
};
/**
* enum msm_display_caps - features/capabilities supported by displays
* @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
* @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
* @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
* @MSM_DISPLAY_CAP_EDID: EDID supported
* @MSM_DISPLAY_ESD_ENABLED: ESD feature enabled
* @MSM_DISPLAY_CAP_MST_MODE: Display with MST support
*/
enum msm_display_caps {
MSM_DISPLAY_CAP_VID_MODE = BIT(0),
MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
MSM_DISPLAY_CAP_EDID = BIT(3),
MSM_DISPLAY_ESD_ENABLED = BIT(4),
MSM_DISPLAY_CAP_MST_MODE = BIT(5),
};
/**
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
* @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
* @MSM_ENC_ACTIVE_REGION - wait for the TG to be in active pixel region
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
MSM_ENC_VBLANK,
MSM_ENC_ACTIVE_REGION,
};
/**
* struct msm_roi_alignment - region of interest alignment restrictions
* @xstart_pix_align: left x offset alignment restriction
* @width_pix_align: width alignment restriction
* @ystart_pix_align: top y offset alignment restriction
* @height_pix_align: height alignment restriction
* @min_width: minimum width restriction
* @min_height: minimum height restriction
*/
struct msm_roi_alignment {
uint32_t xstart_pix_align;
uint32_t width_pix_align;
uint32_t ystart_pix_align;
uint32_t height_pix_align;
uint32_t min_width;
uint32_t min_height;
};
/**
* struct msm_roi_caps - display's region of interest capabilities
* @enabled: true if some region of interest is supported
* @merge_rois: merge rois before sending to display
* @num_roi: maximum number of rois supported
* @align: roi alignment restrictions
*/
struct msm_roi_caps {
bool enabled;
bool merge_rois;
uint32_t num_roi;
struct msm_roi_alignment align;
};
/**
* struct msm_display_dsc_info - defines dsc configuration
* @version: DSC version.
* @scr_rev: DSC revision.
* @pic_height: Picture height in pixels.
* @pic_width: Picture width in pixels.
* @initial_lines: Number of initial lines stored in encoder.
* @pkt_per_line: Number of packets per line.
* @bytes_in_slice: Number of bytes in slice.
* @eol_byte_num: Valid bytes at the end of line.
* @pclk_per_line: Compressed width.
* @full_frame_slices: Number of slice per interface.
* @slice_height: Slice height in pixels.
* @slice_width: Slice width in pixels.
* @chunk_size: Chunk size in bytes for slice multiplexing.
* @slice_last_group_size: Size of last group in pixels.
* @bpp: Target bits per pixel.
* @bpc: Number of bits per component.
* @line_buf_depth: Line buffer bit depth.
* @block_pred_enable: Block prediction enabled/disabled.
* @vbr_enable: VBR mode.
* @enable_422: Indicates if input uses 4:2:2 sampling.
* @convert_rgb: DSC color space conversion.
* @input_10_bits: 10 bit per component input.
* @slice_per_pkt: Number of slices per packet.
* @initial_dec_delay: Initial decoding delay.
* @initial_xmit_delay: Initial transmission delay.
* @initial_scale_value: Scale factor value at the beginning of a slice.
* @scale_decrement_interval: Scale set up at the beginning of a slice.
* @scale_increment_interval: Scale set up at the end of a slice.
* @first_line_bpg_offset: Extra bits allocated on the first line of a slice.
* @nfl_bpg_offset: Slice specific settings.
* @slice_bpg_offset: Slice specific settings.
* @initial_offset: Initial offset at the start of a slice.
* @final_offset: Maximum end-of-slice value.
* @rc_model_size: Number of bits in RC model.
* @det_thresh_flatness: Flatness threshold.
* @max_qp_flatness: Maximum QP for flatness adjustment.
* @min_qp_flatness: Minimum QP for flatness adjustment.
* @edge_factor: Ratio to detect presence of edge.
* @quant_incr_limit0: QP threshold.
* @quant_incr_limit1: QP threshold.
* @tgt_offset_hi: Upper end of variability range.
* @tgt_offset_lo: Lower end of variability range.
* @buf_thresh: Thresholds in RC model
* @range_min_qp: Min QP allowed.
* @range_max_qp: Max QP allowed.
* @range_bpg_offset: Bits per group adjustment.
* @extra_width: Extra width required in timing calculations.
*/
struct msm_display_dsc_info {
u8 version;
u8 scr_rev;
int pic_height;
int pic_width;
int slice_height;
int slice_width;
int initial_lines;
int pkt_per_line;
int bytes_in_slice;
int bytes_per_pkt;
int eol_byte_num;
int pclk_per_line;
int full_frame_slices;
int slice_last_group_size;
int bpp;
int bpc;
int line_buf_depth;
int slice_per_pkt;
int chunk_size;
bool block_pred_enable;
int vbr_enable;
int enable_422;
int convert_rgb;
int input_10_bits;
int initial_dec_delay;
int initial_xmit_delay;
int initial_scale_value;
int scale_decrement_interval;
int scale_increment_interval;
int first_line_bpg_offset;
int nfl_bpg_offset;
int slice_bpg_offset;
int initial_offset;
int final_offset;
int rc_model_size;
int det_thresh_flatness;
int max_qp_flatness;
int min_qp_flatness;
int edge_factor;
int quant_incr_limit0;
int quant_incr_limit1;
int tgt_offset_hi;
int tgt_offset_lo;
u32 *buf_thresh;
char *range_min_qp;
char *range_max_qp;
char *range_bpg_offset;
u32 extra_width;
};
/**
* struct msm_compression_info - defined panel compression
* @comp_type: type of compression supported
* @comp_ratio: compression ratio
* @dsc_info: dsc configuration if the compression
* supported is DSC
*/
struct msm_compression_info {
enum msm_display_compression_type comp_type;
enum msm_display_compression_ratio comp_ratio;
union{
struct msm_display_dsc_info dsc_info;
};
};
/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
* @num_enc: number of compression encoder blocks used
* @num_intf: number of interfaces the panel is mounted on
*/
struct msm_display_topology {
u32 num_lm;
u32 num_enc;
u32 num_intf;
};
/**
* struct msm_mode_info - defines all msm custom mode info
* @frame_rate: frame_rate of the mode
* @vtotal: vtotal calculated for the mode
* @prefill_lines: prefill lines based on porches.
* @jitter_numer: display panel jitter numerator configuration
* @jitter_denom: display panel jitter denominator configuration
* @clk_rate: DSI bit clock per lane in HZ.
* @topology: supported topology for the mode
* @comp_info: compression info supported
* @roi_caps: panel roi capabilities
* @wide_bus_en: wide-bus mode cfg for interface module
* @mdp_transfer_time_us Specifies the mdp transfer time for command mode
* panels in microseconds.
*/
struct msm_mode_info {
uint32_t frame_rate;
uint32_t vtotal;
uint32_t prefill_lines;
uint32_t jitter_numer;
uint32_t jitter_denom;
uint64_t clk_rate;
struct msm_display_topology topology;
struct msm_compression_info comp_info;
struct msm_roi_caps roi_caps;
bool wide_bus_en;
u32 mdp_transfer_time_us;
};
/**
* struct msm_display_info - defines display properties
* @intf_type: DRM_MODE_CONNECTOR_ display type
* @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
* based on num_of_h_tiles
* @is_connected: Set to true if display is connected
* @width_mm: Physical width
* @height_mm: Physical height
* @max_width: Max width of display. In case of hot pluggable display
* this is max width supported by controller
* @max_height: Max height of display. In case of hot pluggable display
* this is max height supported by controller
* @clk_rate: DSI bit clock per lane in HZ.
* @is_primary: Set to true if display is primary display
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
* @roi_caps: Region of interest capability info
* @qsync_min_fps Minimum fps supported by Qsync feature
* @te_source vsync source pin information
*/
struct msm_display_info {
int intf_type;
uint32_t capabilities;
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_connected;
unsigned int width_mm;
unsigned int height_mm;
uint32_t max_width;
uint32_t max_height;
uint64_t clk_rate;
bool is_primary;
bool is_te_using_watchdog_timer;
struct msm_roi_caps roi_caps;
uint32_t qsync_min_fps;
uint32_t te_source;
};
#define MSM_MAX_ROI 4
/**
* struct msm_roi_list - list of regions of interest for a drm object
* @num_rects: number of valid rectangles in the roi array
* @roi: list of roi rectangles
*/
struct msm_roi_list {
uint32_t num_rects;
struct drm_clip_rect roi[MSM_MAX_ROI];
};
/**
* struct - msm_display_kickoff_params - info for display features at kickoff
* @rois: Regions of interest structure for mapping CRTC to Connector output
* @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode
* @qsync_update: Qsync settings were changed/updated
*/
struct msm_display_kickoff_params {
struct msm_roi_list *rois;
struct drm_msm_ext_hdr_metadata *hdr_meta;
uint32_t qsync_mode;
bool qsync_update;
};
/**
* struct msm_drm_event - defines custom event notification struct
* @base: base object required for event notification by DRM framework.
* @event: event object required for event notification by DRM framework.
* @info: contains information of DRM object for which events has been
* requested.
* @data: memory location which contains response payload for event.
*/
struct msm_drm_event {
struct drm_pending_event base;
struct drm_event event;
struct drm_msm_event_req info;
u8 data[];
};
/* Commit/Event thread specific structure */
struct msm_drm_thread {
struct drm_device *dev;
struct task_struct *thread;
unsigned int crtc_id;
struct kthread_worker worker;
};
struct msm_drm_private {
struct drm_device *dev;
struct msm_kms *kms;
struct sde_power_handle phandle;
struct sde_power_client *pclient;
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
/* top level MDSS wrapper device (for MDP5 only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5..
*/
struct hdmi *hdmi;
/* eDP is for mdp5 only, but kms has not been created
* when edp_bind() and edp_init() are called. Here is the only
* place to keep the edp instance.
*/
struct msm_edp *edp;
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
struct drm_fb_helper *fbdev;
struct msm_rd_state *rd; /* debugfs to dump all submits */
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/* list of GEM objects: */
struct list_head inactive_list;
struct workqueue_struct *wq;
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
unsigned int num_planes;
struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
struct task_struct *pp_event_thread;
struct kthread_worker pp_event_worker;
unsigned int num_encoders;
struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_COUNT];
struct drm_property *crtc_property[CRTC_PROP_COUNT];
struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
/* Color processing properties for the crtc */
struct drm_property **cp_property;
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
dma_addr_t paddr;
/* NOTE: mm managed at the page level, size is in # of pages
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
} vram;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
struct drm_atomic_state *pm_state;
/* task holding struct_mutex.. currently only used in submit path
* to detect and reject faults from copy_from_user() for submit
* ioctl.
*/
struct task_struct *struct_mutex_task;
/* list of clients waiting for events */
struct list_head client_event_list;
/* whether registered and drm_dev_unregister should be called */
bool registered;
/* msm drv debug root node */
struct dentry *debug_root;
/* update the flag when msm driver receives shutdown notification */
bool shutdown_in_progress;
};
/* get struct msm_kms * from drm_device * */
#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
struct msm_format {
uint32_t pixel_format;
};
int msm_atomic_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
uint32_t fence;
void (*func)(struct msm_fence_cb *cb);
};
void __msm_fence_worker(struct work_struct *work);
#define INIT_FENCE_CB(_cb, _func) do { \
INIT_WORK(&(_cb)->work, __msm_fence_worker); \
(_cb)->func = _func; \
} while (0)
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
unsigned int flags);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
unsigned int flags);
struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
/* For SDE display */
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
const char *name);
/**
* msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace
*/
void msm_gem_add_obj_to_aspace_active_list(
struct msm_gem_address_space *aspace,
struct drm_gem_object *obj);
/**
* msm_gem_remove_obj_from_aspace_active_list: removes obj from active obj
* list in aspace
*/
void msm_gem_remove_obj_from_aspace_active_list(
struct msm_gem_address_space *aspace,
struct drm_gem_object *obj);
/**
* msm_gem_smmu_address_space_get: returns the aspace pointer for the requested
* domain
*/
struct msm_gem_address_space *
msm_gem_smmu_address_space_get(struct drm_device *dev,
unsigned int domain);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
/**
* msm_gem_aspace_domain_attach_detach: function to inform the attach/detach
* of the domain for this aspace
*/
void msm_gem_aspace_domain_attach_detach_update(
struct msm_gem_address_space *aspace,
bool is_detach);
/**
* msm_gem_address_space_register_cb: function to register callback for attach
* and detach of the domain
*/
int msm_gem_address_space_register_cb(
struct msm_gem_address_space *aspace,
void (*cb)(void *, bool),
void *cb_data);
/**
* msm_gem_address_space_register_cb: function to unregister callback
*/
int msm_gem_address_space_unregister_cb(
struct msm_gem_address_space *aspace,
void (*cb)(void *, bool),
void *cb_data);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
void msm_gem_sync(struct drm_gem_object *obj);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
int msm_gem_delayed_import(struct drm_gem_object *obj);
void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane);
uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **bos);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
struct hdmi;
#ifdef CONFIG_DRM_MSM_HDMI
int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
#else
static inline void __init msm_hdmi_register(void)
{
}
static inline void __exit msm_hdmi_unregister(void)
{
}
#endif
struct msm_edp;
#ifdef CONFIG_DRM_MSM_EDP
void __init msm_edp_register(void);
void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
#else
static inline void __init msm_edp_register(void)
{
}
static inline void __exit msm_edp_unregister(void)
{
}
static inline int msm_edp_modeset_init(struct msm_edp *edp,
struct drm_device *dev, struct drm_encoder *encoder)
{
return -EINVAL;
}
#endif
struct msm_dsi;
/* *
* msm_mode_object_event_notify - notify user-space clients of drm object
* events.
* @obj: mode object (crtc/connector) that is generating the event.
* @event: event that needs to be notified.
* @payload: payload for the event.
*/
void msm_mode_object_event_notify(struct drm_mode_object *obj,
struct drm_device *dev, struct drm_event *event, u8 *payload);
#ifndef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder);
#else
static inline void __init msm_dsi_register(void)
{
}
static inline void __exit msm_dsi_unregister(void)
{
}
static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
struct drm_device *dev,
struct drm_encoder *encoder)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_DRM_MSM_MDP5
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
#else
static inline void __init msm_mdp_register(void)
{
}
static inline void __exit msm_mdp_unregister(void)
{
}
#endif
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
unsigned long msm_iomap_size(struct platform_device *pdev, const char *name);
void msm_iounmap(struct platform_device *dev, void __iomem *addr);
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
static inline int align_pitch(int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
/* adreno needs pitch aligned to 32 pixels: */
return bytespp * ALIGN(width, 32);
}
/* for the generated headers: */
#define INVALID_IDX(idx) ({BUG(); 0;})
#define fui(x) ({BUG(); 0;})
#define util_float_to_half(x) ({BUG(); 0;})
#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
unsigned long remaining_jiffies;
if (ktime_compare(*timeout, now) < 0) {
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
struct timespec ts = ktime_to_timespec(rem);
remaining_jiffies = timespec_to_jiffies(&ts);
}
return remaining_jiffies;
}
#endif /* __MSM_DRV_H__ */

456
msm/msm_fb.c Normal file
View File

@@ -0,0 +1,456 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/dma-mapping.h>
#include <linux/dma-buf.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
#define MSM_FRAMEBUFFER_FLAG_KMAP BIT(0)
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
void *vaddr[MAX_PLANE];
atomic_t kmap_count;
u32 flags;
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct msm_framebuffer *msm_fb;
int i, n;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->format->format,
drm_framebuffer_read_refcount(fb), fb->base.id);
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
msm_gem_describe(fb->obj[i], m);
}
}
#endif
void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable)
{
struct msm_framebuffer *msm_fb;
int i, n;
struct drm_gem_object *bo;
struct msm_gem_object *msm_obj;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
if (!fb->format) {
DRM_ERROR("from:%pS null fb->format\n",
__builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
for (i = 0; i < n; i++) {
bo = msm_framebuffer_bo(fb, i);
if (bo) {
msm_obj = to_msm_bo(bo);
if (enable)
msm_obj->flags |= MSM_BO_KEEPATTRS;
else
msm_obj->flags &= ~MSM_BO_KEEPATTRS;
}
}
}
void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable)
{
struct msm_framebuffer *msm_fb;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
if (enable)
msm_fb->flags |= MSM_FRAMEBUFFER_FLAG_KMAP;
else
msm_fb->flags &= ~MSM_FRAMEBUFFER_FLAG_KMAP;
}
static int msm_framebuffer_kmap(struct drm_framebuffer *fb)
{
struct msm_framebuffer *msm_fb;
int i, n;
struct drm_gem_object *bo;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return -EINVAL;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
if (atomic_inc_return(&msm_fb->kmap_count) > 1)
return 0;
for (i = 0; i < n; i++) {
bo = msm_framebuffer_bo(fb, i);
if (!bo || !bo->dma_buf) {
msm_fb->vaddr[i] = NULL;
continue;
}
dma_buf_begin_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL);
msm_fb->vaddr[i] = dma_buf_kmap(bo->dma_buf, 0);
DRM_INFO("FB[%u]: vaddr[%d]:%ux%u:0x%llx\n", fb->base.id, i,
fb->width, fb->height, (u64) msm_fb->vaddr[i]);
}
return 0;
}
static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
{
struct msm_framebuffer *msm_fb;
int i, n;
struct drm_gem_object *bo;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
if (atomic_dec_return(&msm_fb->kmap_count) > 0)
return;
for (i = 0; i < n; i++) {
bo = msm_framebuffer_bo(fb, i);
if (!bo || !msm_fb->vaddr[i])
continue;
if (bo->dma_buf) {
dma_buf_kunmap(bo->dma_buf, 0, msm_fb->vaddr[i]);
dma_buf_end_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL);
}
msm_fb->vaddr[i] = NULL;
}
}
/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
* to prepare an fb more multiple different initiator 'id's. But that
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb;
int ret, i, n;
uint64_t iova;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return -EINVAL;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
msm_framebuffer_kmap(fb);
return 0;
}
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb;
int i, n;
if (fb == NULL) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return;
}
msm_fb = to_msm_framebuffer(fb);
n = fb->format->num_planes;
if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
msm_framebuffer_kunmap(fb);
for (i = 0; i < n; i++)
msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return -EINVAL;
}
if (!fb->obj[plane])
return 0;
return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
int plane)
{
struct msm_framebuffer *msm_fb;
dma_addr_t phys_addr;
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return -EINVAL;
}
msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->base.obj[plane])
return 0;
phys_addr = msm_gem_get_dma_addr(msm_fb->base.obj[plane]);
if (!phys_addr)
return 0;
return phys_addr + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
if (!fb) {
DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
return ERR_PTR(-EINVAL);
}
return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
{
return fb ? (to_msm_framebuffer(fb))->format : NULL;
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
for (i = 0; i < n; i++) {
bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!bos[i]) {
ret = -ENXIO;
goto out_unref;
}
}
fb = msm_framebuffer_init(dev, mode_cmd, bos);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unref;
}
return fb;
out_unref:
for (i = 0; i < n; i++)
drm_gem_object_put_unlocked(bos[i]);
return ERR_PTR(ret);
}
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
struct drm_framebuffer *fb;
const struct msm_format *format;
int ret, i, num_planes;
unsigned int hsub, vsub;
bool is_modified = false;
DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
num_planes = drm_format_num_planes(mode_cmd->pixel_format);
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
}
msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
if (!msm_fb) {
ret = -ENOMEM;
goto fail;
}
fb = &msm_fb->base;
msm_fb->format = format;
atomic_set(&msm_fb->kmap_count, 0);
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) {
if (mode_cmd->modifier[i]) {
is_modified = true;
break;
}
}
}
if (num_planes > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
if (is_modified) {
if (!kms->funcs->check_modified_format) {
dev_err(dev->dev, "can't check modified fb format\n");
ret = -EINVAL;
goto fail;
} else {
ret = kms->funcs->check_modified_format(
kms, msm_fb->format, mode_cmd, bos);
if (ret)
goto fail;
}
} else {
for (i = 0; i < num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? hsub : 1);
unsigned int height = mode_cmd->height / (i ? vsub : 1);
unsigned int min_size;
unsigned int cpp;
cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
min_size = (height - 1) * mode_cmd->pitches[i]
+ width * cpp
+ mode_cmd->offsets[i];
if (bos[i]->size < min_size) {
ret = -EINVAL;
goto fail;
}
}
}
for (i = 0; i < num_planes; i++)
msm_fb->base.obj[i] = bos[i];
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
return fb;
fail:
kfree(msm_fb);
return ERR_PTR(ret);
}
struct drm_framebuffer *
msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
{
struct drm_mode_fb_cmd2 mode_cmd = {
.pixel_format = format,
.width = w,
.height = h,
.pitches = { p },
};
struct drm_gem_object *bo;
struct drm_framebuffer *fb;
int size;
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
if (IS_ERR(bo)) {
dev_warn(dev->dev, "could not allocate stolen bo\n");
/* try regular bo: */
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
dev_err(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_put_unlocked(bo);
return ERR_CAST(fb);
}
return fb;
}

1234
msm/msm_gem.c Normal file

File diff suppressed because it is too large Load Diff

211
msm/msm_gem.h Normal file
View File

@@ -0,0 +1,211 @@
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
#define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */
#define MSM_BO_SKIPSYNC 0x40000000 /* skip dmabuf cpu sync */
#define MSM_BO_EXTBUF 0x80000000 /* indicate BO is an import buffer */
struct msm_gem_object;
struct msm_gem_aspace_ops {
int (*map)(struct msm_gem_address_space *space, struct msm_gem_vma *vma,
struct sg_table *sgt, int npages, unsigned int flags);
void (*unmap)(struct msm_gem_address_space *space,
struct msm_gem_vma *vma, struct sg_table *sgt,
unsigned int flags);
void (*destroy)(struct msm_gem_address_space *space);
void (*add_to_active)(struct msm_gem_address_space *space,
struct msm_gem_object *obj);
void (*remove_from_active)(struct msm_gem_address_space *space,
struct msm_gem_object *obj);
int (*register_cb)(struct msm_gem_address_space *space,
void (*cb)(void *cb, bool data),
void *cb_data);
int (*unregister_cb)(struct msm_gem_address_space *space,
void (*cb)(void *cb, bool data),
void *cb_data);
};
struct aspace_client {
void (*cb)(void *cb, bool data);
void *cb_data;
struct list_head list;
};
struct msm_gem_address_space {
const char *name;
/* NOTE: mm managed at the page level, size is in # of pages
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
spinlock_t lock; /* Protects drm_mm node allocation/removal */
struct msm_mmu *mmu;
struct kref kref;
bool domain_attached;
const struct msm_gem_aspace_ops *ops;
struct drm_device *dev;
/* list of mapped objects */
struct list_head active_list;
/* list of clients */
struct list_head clients;
struct mutex list_lock; /* Protects active_list & clients */
};
struct msm_gem_vma {
struct drm_mm_node node;
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
};
struct msm_gem_object {
struct drm_gem_object base;
uint32_t flags;
/**
* Advice: are the backing pages purgeable?
*/
uint8_t madv;
/**
* count of active vmap'ing
*/
uint8_t vmap_count;
/* And object is either:
* inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at
* least for now we don't have (I don't think) hw sync between
* 2d and 3d one devices which have both, meaning we need to
* block on submit if a bo is already on other ring
*
*/
struct list_head mm_list;
struct msm_gpu *gpu; /* non-null if active */
/* Transiently in the process of submit ioctl, objects associated
* with the submit are on submit->bo_list.. this only lasts for
* the duration of the ioctl, so one bo can never be on multiple
* submit lists.
*/
struct list_head submit_entry;
struct page **pages;
struct sg_table *sgt;
void *vaddr;
struct list_head vmas; /* list of msm_gem_vma */
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
struct reservation_object _resv;
/* For physically contiguous buffers. Used when we don't have
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
struct list_head iova_list;
struct msm_gem_address_space *aspace;
bool in_active_list;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
static inline bool is_active(struct msm_gem_object *msm_obj)
{
return msm_obj->gpu != NULL;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
}
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
/* The shrinker can be triggered while we hold objA->lock, and need
* to grab objB->lock to purge it. Lockdep just sees these as a single
* class of lock, so we use subclasses to teach it the difference.
*
* OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
* OBJ_LOCK_SHRINKER is used by shrinker.
*
* It is *essential* that we never go down paths that could trigger the
* shrinker for a purgable object. This is ensured by checking that
* msm_obj->madv == MSM_MADV_WILLNEED.
*/
enum msm_gem_lock {
OBJ_LOCK_NORMAL,
OBJ_LOCK_SHRINKER,
};
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only
* lasts for the duration of the submit-ioctl.
*/
struct msm_gem_submit {
struct drm_device *dev;
struct msm_gpu *gpu;
struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
struct dma_fence *fence;
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
uint32_t type;
uint32_t size; /* in dwords */
uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
uint64_t iova;
} bos[0];
};
#endif /* __MSM_GEM_H__ */

205
msm/msm_gem_prime.c Normal file
View File

@@ -0,0 +1,205 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_kms.h"
#include <linux/dma-buf.h>
#include <linux/ion.h>
#include <linux/msm_ion.h>
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return NULL;
return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
{
return msm_gem_get_vaddr(obj);
}
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
msm_gem_put_vaddr(obj);
}
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap_obj(obj, obj->size, vma);
if (ret < 0)
return ret;
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
return msm_gem_import(dev, attach->dmabuf, sg);
}
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_get_pages(obj);
return 0;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return msm_obj->resv;
}
struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct sg_table *sgt = NULL;
struct drm_gem_object *obj;
struct device *attach_dev = NULL;
unsigned long flags = 0;
struct msm_drm_private *priv;
struct msm_kms *kms;
int ret;
u32 domain;
if (!dma_buf || !dev->dev_private)
return ERR_PTR(-EINVAL);
priv = dev->dev_private;
kms = priv->kms;
if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) {
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
return obj;
}
}
if (!dev->driver->gem_prime_import_sg_table) {
DRM_ERROR("NULL gem_prime_import_sg_table\n");
return ERR_PTR(-EINVAL);
}
get_dma_buf(dma_buf);
ret = dma_buf_get_flags(dma_buf, &flags);
if (ret) {
DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
goto fail_put;
}
if (!kms || !kms->funcs->get_address_space_device) {
DRM_ERROR("invalid kms ops\n");
goto fail_put;
}
if (flags & ION_FLAG_SECURE) {
if (flags & ION_FLAG_CP_PIXEL)
attach_dev = kms->funcs->get_address_space_device(kms,
MSM_SMMU_DOMAIN_SECURE);
else if ((flags & ION_FLAG_CP_SEC_DISPLAY)
|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
attach_dev = dev->dev;
else
DRM_ERROR("invalid ion secure flag: 0x%x\n", flags);
} else {
attach_dev = kms->funcs->get_address_space_device(kms,
MSM_SMMU_DOMAIN_UNSECURE);
}
if (!attach_dev) {
DRM_ERROR("aspace device not found for domain:%d\n", domain);
ret = -EINVAL;
goto fail_put;
}
attach = dma_buf_attach(dma_buf, attach_dev);
if (IS_ERR(attach)) {
DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach));
return ERR_CAST(attach);
}
/*
* For cached buffers where CPU access is required, dma_map_attachment
* must be called now to allow user-space to perform cpu sync begin/end
* otherwise do delayed mapping during the commit.
*/
if (flags & ION_FLAG_CACHED) {
attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
sgt = dma_buf_map_attachment(
attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
DRM_ERROR(
"dma_buf_map_attachment failure, err=%d\n",
ret);
goto fail_detach;
}
}
/*
* If importing a NULL sg table (i.e. for uncached buffers),
* create a drm gem object with only the dma buf attachment.
*/
obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
DRM_ERROR("gem_prime_import_sg_table failure, err=%d\n", ret);
goto fail_unmap;
}
obj->import_attach = attach;
return obj;
fail_unmap:
if (sgt)
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
fail_put:
dma_buf_put(dma_buf);
return ERR_PTR(ret);
}

373
msm/msm_gem_vma.c Normal file
View File

@@ -0,0 +1,373 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
/* SDE address space operations */
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
unsigned int flags)
{
if (!vma->iova)
return;
if (aspace) {
aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, sgt,
DMA_BIDIRECTIONAL, flags);
}
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
int npages, unsigned int flags)
{
int ret = -EINVAL;
if (!aspace || !aspace->domain_attached)
return ret;
ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt,
DMA_BIDIRECTIONAL, flags);
if (!ret)
vma->iova = sg_dma_address(sgt->sgl);
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
return ret;
}
static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
{
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
}
static void smmu_aspace_add_to_active(
struct msm_gem_address_space *aspace,
struct msm_gem_object *msm_obj)
{
WARN_ON(!mutex_is_locked(&aspace->list_lock));
list_move_tail(&msm_obj->iova_list, &aspace->active_list);
}
static void smmu_aspace_remove_from_active(
struct msm_gem_address_space *aspace,
struct msm_gem_object *obj)
{
struct msm_gem_object *msm_obj, *next;
WARN_ON(!mutex_is_locked(&aspace->list_lock));
list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
iova_list) {
if (msm_obj == obj) {
list_del(&msm_obj->iova_list);
break;
}
}
}
static int smmu_aspace_register_cb(
struct msm_gem_address_space *aspace,
void (*cb)(void *, bool),
void *cb_data)
{
struct aspace_client *aclient = NULL;
struct aspace_client *temp;
if (!aspace)
return -EINVAL;
if (!aspace->domain_attached)
return -EACCES;
aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
if (!aclient)
return -ENOMEM;
aclient->cb = cb;
aclient->cb_data = cb_data;
INIT_LIST_HEAD(&aclient->list);
/* check if callback is already registered */
mutex_lock(&aspace->list_lock);
list_for_each_entry(temp, &aspace->clients, list) {
if ((temp->cb == aclient->cb) &&
(temp->cb_data == aclient->cb_data)) {
kfree(aclient);
mutex_unlock(&aspace->list_lock);
return -EEXIST;
}
}
list_move_tail(&aclient->list, &aspace->clients);
mutex_unlock(&aspace->list_lock);
return 0;
}
static int smmu_aspace_unregister_cb(
struct msm_gem_address_space *aspace,
void (*cb)(void *, bool),
void *cb_data)
{
struct aspace_client *aclient = NULL;
int rc = -ENOENT;
if (!aspace || !cb)
return -EINVAL;
mutex_lock(&aspace->list_lock);
list_for_each_entry(aclient, &aspace->clients, list) {
if ((aclient->cb == cb) &&
(aclient->cb_data == cb_data)) {
list_del(&aclient->list);
kfree(aclient);
rc = 0;
break;
}
}
mutex_unlock(&aspace->list_lock);
return rc;
}
static const struct msm_gem_aspace_ops smmu_aspace_ops = {
.map = smmu_aspace_map_vma,
.unmap = smmu_aspace_unmap_vma,
.destroy = smmu_aspace_destroy,
.add_to_active = smmu_aspace_add_to_active,
.remove_from_active = smmu_aspace_remove_from_active,
.register_cb = smmu_aspace_register_cb,
.unregister_cb = smmu_aspace_unregister_cb,
};
struct msm_gem_address_space *
msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
const char *name)
{
struct msm_gem_address_space *aspace;
if (!mmu)
return ERR_PTR(-EINVAL);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->dev = dev;
aspace->name = name;
aspace->mmu = mmu;
aspace->ops = &smmu_aspace_ops;
INIT_LIST_HEAD(&aspace->active_list);
INIT_LIST_HEAD(&aspace->clients);
kref_init(&aspace->kref);
mutex_init(&aspace->list_lock);
return aspace;
}
static void
msm_gem_address_space_destroy(struct kref *kref)
{
struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref);
if (aspace && aspace->ops->destroy)
aspace->ops->destroy(aspace);
kfree(aspace);
}
void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
{
if (aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
/* GPU address space operations */
static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
unsigned int flags)
{
if (!aspace || !vma->iova)
return;
if (aspace->mmu) {
unsigned size = vma->node.size << PAGE_SHIFT;
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
}
spin_lock(&aspace->lock);
drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
unsigned int flags)
{
if (aspace && aspace->ops->unmap)
aspace->ops->unmap(aspace, vma, sgt, flags);
}
static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
int npages, unsigned int flags)
{
int ret;
spin_lock(&aspace->lock);
if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
spin_unlock(&aspace->lock);
return 0;
}
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
if (ret)
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
if (aspace->mmu) {
unsigned size = npages << PAGE_SHIFT;
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, IOMMU_READ | IOMMU_WRITE);
}
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
return ret;
}
static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
{
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
}
static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
.map = iommu_aspace_map_vma,
.unmap = iommu_aspace_unmap_vma,
.destroy = iommu_aspace_destroy,
};
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_gem_address_space *aspace;
u64 size = domain->geometry.aperture_end -
domain->geometry.aperture_start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain);
aspace->ops = &msm_iommu_aspace_ops;
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
size >> PAGE_SHIFT);
kref_init(&aspace->kref);
return aspace;
}
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
unsigned int flags)
{
if (aspace && aspace->ops->map)
return aspace->ops->map(aspace, vma, sgt, npages, flags);
return -EINVAL;
}
struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace)
{
struct device *client_dev = NULL;
if (aspace && aspace->mmu && aspace->mmu->funcs->get_dev)
client_dev = aspace->mmu->funcs->get_dev(aspace->mmu);
return client_dev;
}
void msm_gem_add_obj_to_aspace_active_list(
struct msm_gem_address_space *aspace,
struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (aspace && aspace->ops && aspace->ops->add_to_active)
aspace->ops->add_to_active(aspace, msm_obj);
}
void msm_gem_remove_obj_from_aspace_active_list(
struct msm_gem_address_space *aspace,
struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (aspace && aspace->ops && aspace->ops->remove_from_active)
aspace->ops->remove_from_active(aspace, msm_obj);
}
int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
void (*cb)(void *, bool),
void *cb_data)
{
if (aspace && aspace->ops && aspace->ops->register_cb)
return aspace->ops->register_cb(aspace, cb, cb_data);
return -EINVAL;
}
int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
void (*cb)(void *, bool),
void *cb_data)
{
if (aspace && aspace->ops && aspace->ops->unregister_cb)
return aspace->ops->unregister_cb(aspace, cb, cb_data);
return -EINVAL;
}

114
msm/msm_iommu.c Normal file
View File

@@ -0,0 +1,114 @@
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
if (iommu->base.handler)
return iommu->base.handler(iommu->base.arg, iova, flags);
pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
return 0;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
int ret;
pm_runtime_get_sync(mmu->dev);
ret = iommu_attach_device(iommu->domain, mmu->dev);
pm_runtime_put_sync(mmu->dev);
return ret;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
pm_runtime_put_sync(mmu->dev);
return 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
kfree(iommu);
}
static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
};
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
return &iommu->base;
}

220
msm/msm_kms.h Normal file
View File

@@ -0,0 +1,220 @@
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_KMS_H__
#define __MSM_KMS_H__
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#define MAX_PLANE 4
/**
* Device Private DRM Mode Flags
* drm_mode->private_flags
*/
/* Connector has interpreted seamless transition request as dynamic fps */
#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0)
/* Transition to new mode requires a wait-for-vblank before the modeset */
#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1)
/* Request to switch the connector mode */
#define MSM_MODE_FLAG_SEAMLESS_DMS (1<<2)
/* Request to switch the fps */
#define MSM_MODE_FLAG_SEAMLESS_VRR (1<<3)
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
* for constructing the appropriate planes/crtcs/encoders/connectors.
*/
struct msm_kms_funcs {
/* hw initialization: */
int (*hw_init)(struct msm_kms *kms);
int (*postinit)(struct msm_kms *kms);
/* irq handling: */
void (*irq_preinstall)(struct msm_kms *kms);
int (*irq_postinstall)(struct msm_kms *kms);
void (*irq_uninstall)(struct msm_kms *kms);
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
void (*prepare_fence)(struct msm_kms *kms,
struct drm_atomic_state *state);
void (*prepare_commit)(struct msm_kms *kms,
struct drm_atomic_state *state);
void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms,
struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
struct drm_crtc *crtc);
/* function pointer to wait for pixel transfer to panel to complete*/
void (*wait_for_tx_complete)(struct msm_kms *kms,
struct drm_crtc *crtc);
/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
const uint64_t modifier);
/* do format checking on format modified through fb_cmd2 modifiers */
int (*check_modified_format)(const struct msm_kms *kms,
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos);
/* perform complete atomic check of given atomic state */
int (*atomic_check)(struct msm_kms *kms,
struct drm_atomic_state *state);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
int (*set_split_display)(struct msm_kms *kms,
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
void (*postopen)(struct msm_kms *kms, struct drm_file *file);
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*postclose)(struct msm_kms *kms, struct drm_file *file);
void (*lastclose)(struct msm_kms *kms,
struct drm_modeset_acquire_ctx *ctx);
int (*register_events)(struct msm_kms *kms,
struct drm_mode_object *obj, u32 event, bool en);
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
/* pm suspend/resume hooks */
int (*pm_suspend)(struct device *dev);
int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
/* get address space */
struct msm_gem_address_space *(*get_address_space)(
struct msm_kms *kms,
unsigned int domain);
struct device *(*get_address_space_device)(
struct msm_kms *kms,
unsigned int domain);
#ifdef CONFIG_DEBUG_FS
/* debugfs: */
int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
#endif
/* handle continuous splash */
int (*cont_splash_config)(struct msm_kms *kms);
/* check for continuous splash status */
bool (*check_for_splash)(struct msm_kms *kms);
};
struct msm_kms {
const struct msm_kms_funcs *funcs;
/* irq number to be passed on to drm_irq_install */
int irq;
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
};
/**
* Subclass of drm_atomic_state, to allow kms backend to have driver
* private global state. The kms backend can do whatever it wants
* with the ->state ptr. On ->atomic_state_clear() the ->state ptr
* is kfree'd and set back to NULL.
*/
struct msm_kms_state {
struct drm_atomic_state base;
void *state;
};
#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
kms->funcs = funcs;
}
#ifdef CONFIG_DRM_MSM_MDP4
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
#else
static inline
struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
#endif
#ifdef CONFIG_DRM_MSM_MDP5
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
int msm_mdss_enable(struct msm_mdss *mdss);
int msm_mdss_disable(struct msm_mdss *mdss);
#else
static inline int msm_mdss_init(struct drm_device *dev)
{
return 0;
}
static inline void msm_mdss_destroy(struct drm_device *dev)
{
}
static inline struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
return NULL;
}
static inline int msm_mdss_enable(struct msm_mdss *mdss)
{
return 0;
}
static inline int msm_mdss_disable(struct msm_mdss *mdss)
{
return 0;
}
#endif
struct msm_kms *sde_kms_init(struct drm_device *dev);
/**
* Mode Set Utility Functions
*/
static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode)
{
return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
}
static inline bool msm_is_mode_seamless_dms(const struct drm_display_mode *mode)
{
return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DMS)
: false;
}
static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
{
return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
(mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
}
static inline bool msm_is_mode_seamless_vrr(const struct drm_display_mode *mode)
{
return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_VRR)
: false;
}
static inline bool msm_needs_vblank_pre_modeset(
const struct drm_display_mode *mode)
{
return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET);
}
#endif /* __MSM_KMS_H__ */

88
msm/msm_mmu.h Normal file
View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_MMU_H__
#define __MSM_MMU_H__
#include <linux/iommu.h>
struct msm_mmu;
enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_UNSECURE,
MSM_SMMU_DOMAIN_NRT_UNSECURE,
MSM_SMMU_DOMAIN_SECURE,
MSM_SMMU_DOMAIN_NRT_SECURE,
MSM_SMMU_DOMAIN_MAX,
};
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned int len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned int len);
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
int dir, u32 flags);
void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
int dir, u32 flags);
void (*destroy)(struct msm_mmu *mmu);
bool (*is_domain_secure)(struct msm_mmu *mmu);
int (*set_attribute)(struct msm_mmu *mmu,
enum iommu_attr attr, void *data);
int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
uint32_t dest_address, uint32_t size, int prot);
int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
uint32_t size);
struct device *(*get_dev)(struct msm_mmu *mmu);
};
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags);
void *arg;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs)
{
mmu->dev = dev;
mmu->funcs = funcs;
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags))
{
mmu->arg = arg;
mmu->handler = handler;
}
/* SDE smmu driver initialize and cleanup functions */
int __init msm_smmu_driver_init(void);
void __exit msm_smmu_driver_cleanup(void);
#endif /* __MSM_MMU_H__ */

674
msm/msm_prop.c Normal file
View File

@@ -0,0 +1,674 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "msm_prop.h"
void msm_property_init(struct msm_property_info *info,
struct drm_mode_object *base,
struct drm_device *dev,
struct drm_property **property_array,
struct msm_property_data *property_data,
uint32_t property_count,
uint32_t blob_count,
uint32_t state_size)
{
/* prevent access if any of these are NULL */
if (!base || !dev || !property_array || !property_data) {
property_count = 0;
blob_count = 0;
DRM_ERROR("invalid arguments, forcing zero properties\n");
return;
}
/* can't have more blob properties than total properties */
if (blob_count > property_count) {
blob_count = property_count;
DBG("Capping number of blob properties to %d", blob_count);
}
if (!info) {
DRM_ERROR("info pointer is NULL\n");
} else {
info->base = base;
info->dev = dev;
info->property_array = property_array;
info->property_data = property_data;
info->property_count = property_count;
info->blob_count = blob_count;
info->install_request = 0;
info->install_count = 0;
info->recent_idx = 0;
info->is_active = false;
info->state_size = state_size;
info->state_cache_size = 0;
mutex_init(&info->property_lock);
memset(property_data,
0,
sizeof(struct msm_property_data) *
property_count);
}
}
void msm_property_destroy(struct msm_property_info *info)
{
if (!info)
return;
/* free state cache */
while (info->state_cache_size > 0)
kfree(info->state_cache[--(info->state_cache_size)]);
mutex_destroy(&info->property_lock);
}
int msm_property_pop_dirty(struct msm_property_info *info,
struct msm_property_state *property_state)
{
struct list_head *item;
int rc = 0;
if (!info || !property_state || !property_state->values) {
DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
mutex_lock(&info->property_lock);
if (list_empty(&property_state->dirty_list)) {
rc = -EAGAIN;
} else {
item = property_state->dirty_list.next;
list_del_init(item);
rc = container_of(item, struct msm_property_value, dirty_node)
- property_state->values;
DRM_DEBUG_KMS("property %d dirty\n", rc);
}
mutex_unlock(&info->property_lock);
return rc;
}
/**
* _msm_property_set_dirty_no_lock - flag given property as being dirty
* This function doesn't mutex protect the
* dirty linked list.
* @info: Pointer to property info container struct
* @property_state: Pointer to property state container struct
* @property_idx: Property index
*/
static void _msm_property_set_dirty_no_lock(
struct msm_property_info *info,
struct msm_property_state *property_state,
uint32_t property_idx)
{
if (!info || !property_state || !property_state->values ||
property_idx >= info->property_count) {
DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
return;
}
/* avoid re-inserting if already dirty */
if (!list_empty(&property_state->values[property_idx].dirty_node)) {
DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
return;
}
list_add_tail(&property_state->values[property_idx].dirty_node,
&property_state->dirty_list);
}
bool msm_property_is_dirty(
struct msm_property_info *info,
struct msm_property_state *property_state,
uint32_t property_idx)
{
if (!info || !property_state || !property_state->values ||
property_idx >= info->property_count) {
DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
return false;
}
return !list_empty(&property_state->values[property_idx].dirty_node);
}
/**
* _msm_property_install_integer - install standard drm range property
* @info: Pointer to property info container struct
* @name: Property name
* @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
* @min: Min property value
* @max: Max property value
* @init: Default Property value
* @property_idx: Property index
* @force_dirty: Whether or not to filter 'dirty' status on unchanged values
*/
static void _msm_property_install_integer(struct msm_property_info *info,
const char *name, int flags, uint64_t min, uint64_t max,
uint64_t init, uint32_t property_idx, bool force_dirty)
{
struct drm_property **prop;
if (!info)
return;
++info->install_request;
if (!name || (property_idx >= info->property_count)) {
DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
} else {
prop = &info->property_array[property_idx];
/*
* Properties need to be attached to each drm object that
* uses them, but only need to be created once
*/
if (!*prop) {
*prop = drm_property_create_range(info->dev,
flags, name, min, max);
if (!*prop)
DRM_ERROR("create %s property failed\n", name);
}
/* save init value for later */
info->property_data[property_idx].default_value = init;
info->property_data[property_idx].force_dirty = force_dirty;
/* always attach property, if created */
if (*prop) {
drm_object_attach_property(info->base, *prop, init);
++info->install_count;
}
}
}
void msm_property_install_range(struct msm_property_info *info,
const char *name, int flags, uint64_t min, uint64_t max,
uint64_t init, uint32_t property_idx)
{
_msm_property_install_integer(info, name, flags,
min, max, init, property_idx, false);
}
void msm_property_install_volatile_range(struct msm_property_info *info,
const char *name, int flags, uint64_t min, uint64_t max,
uint64_t init, uint32_t property_idx)
{
_msm_property_install_integer(info, name, flags,
min, max, init, property_idx, true);
}
void msm_property_install_enum(struct msm_property_info *info,
const char *name, int flags, int is_bitmask,
const struct drm_prop_enum_list *values, int num_values,
uint32_t property_idx)
{
struct drm_property **prop;
if (!info)
return;
++info->install_request;
if (!name || !values || !num_values ||
(property_idx >= info->property_count)) {
DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
} else {
prop = &info->property_array[property_idx];
/*
* Properties need to be attached to each drm object that
* uses them, but only need to be created once
*/
if (!*prop) {
/* 'bitmask' is a special type of 'enum' */
if (is_bitmask)
*prop = drm_property_create_bitmask(info->dev,
DRM_MODE_PROP_BITMASK | flags,
name, values, num_values, -1);
else
*prop = drm_property_create_enum(info->dev,
DRM_MODE_PROP_ENUM | flags,
name, values, num_values);
if (!*prop)
DRM_ERROR("create %s property failed\n", name);
}
/* save init value for later */
info->property_data[property_idx].default_value = 0;
info->property_data[property_idx].force_dirty = false;
/* select first defined value for enums */
if (!is_bitmask)
info->property_data[property_idx].default_value =
values->type;
/* always attach property, if created */
if (*prop) {
drm_object_attach_property(info->base, *prop,
info->property_data
[property_idx].default_value);
++info->install_count;
}
}
}
void msm_property_install_blob(struct msm_property_info *info,
const char *name, int flags, uint32_t property_idx)
{
struct drm_property **prop;
if (!info)
return;
++info->install_request;
if (!name || (property_idx >= info->blob_count)) {
DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
} else {
prop = &info->property_array[property_idx];
/*
* Properties need to be attached to each drm object that
* uses them, but only need to be created once
*/
if (!*prop) {
/* use 'create' for blob property place holder */
*prop = drm_property_create(info->dev,
DRM_MODE_PROP_BLOB | flags, name, 0);
if (!*prop)
DRM_ERROR("create %s property failed\n", name);
}
/* save init value for later */
info->property_data[property_idx].default_value = 0;
info->property_data[property_idx].force_dirty = true;
/* always attach property, if created */
if (*prop) {
drm_object_attach_property(info->base, *prop, -1);
++info->install_count;
}
}
}
int msm_property_install_get_status(struct msm_property_info *info)
{
int rc = -ENOMEM;
if (info && (info->install_request == info->install_count))
rc = 0;
return rc;
}
int msm_property_index(struct msm_property_info *info,
struct drm_property *property)
{
uint32_t count;
int32_t idx;
int rc = -EINVAL;
if (!info || !property) {
DRM_ERROR("invalid argument(s)\n");
} else {
/*
* Linear search, but start from last found index. This will
* help if any single property is accessed multiple times in a
* row. Ideally, we could keep a list of properties sorted in
* the order of most recent access, but that may be overkill
* for now.
*/
mutex_lock(&info->property_lock);
idx = info->recent_idx;
count = info->property_count;
while (count) {
--count;
/* stop searching on match */
if (info->property_array[idx] == property) {
info->recent_idx = idx;
rc = idx;
break;
}
/* move to next valid index */
if (--idx < 0)
idx = info->property_count - 1;
}
mutex_unlock(&info->property_lock);
}
return rc;
}
int msm_property_set_dirty(struct msm_property_info *info,
struct msm_property_state *property_state,
int property_idx)
{
if (!info || !property_state || !property_state->values) {
DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
mutex_lock(&info->property_lock);
_msm_property_set_dirty_no_lock(info, property_state, property_idx);
mutex_unlock(&info->property_lock);
return 0;
}
int msm_property_atomic_set(struct msm_property_info *info,
struct msm_property_state *property_state,
struct drm_property *property, uint64_t val)
{
struct drm_property_blob *blob;
int property_idx, rc = -EINVAL;
if (!info || !property_state) {
DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
property_idx = msm_property_index(info, property);
if ((property_idx == -EINVAL) || !property_state->values) {
DRM_ERROR("invalid argument(s)\n");
} else {
/* extra handling for incoming properties */
mutex_lock(&info->property_lock);
if ((property->flags & DRM_MODE_PROP_BLOB) &&
(property_idx < info->blob_count)) {
/* need to clear previous ref */
if (property_state->values[property_idx].blob)
drm_property_blob_put(
property_state->values[
property_idx].blob);
/* DRM lookup also takes a reference */
blob = drm_property_lookup_blob(info->dev,
(uint32_t)val);
if (val && !blob) {
DRM_ERROR("prop %d blob id 0x%llx not found\n",
property_idx, val);
val = 0;
} else {
if (blob) {
DBG("Blob %u saved", blob->base.id);
val = blob->base.id;
}
/* save the new blob */
property_state->values[property_idx].blob =
blob;
}
}
/* update value and flag as dirty */
if (property_state->values[property_idx].value != val ||
info->property_data[property_idx].force_dirty) {
property_state->values[property_idx].value = val;
_msm_property_set_dirty_no_lock(info, property_state,
property_idx);
DBG("%s - %lld", property->name, val);
}
mutex_unlock(&info->property_lock);
rc = 0;
}
return rc;
}
int msm_property_atomic_get(struct msm_property_info *info,
struct msm_property_state *property_state,
struct drm_property *property, uint64_t *val)
{
int property_idx, rc = -EINVAL;
property_idx = msm_property_index(info, property);
if (!info || (property_idx == -EINVAL) ||
!property_state->values || !val) {
DRM_DEBUG("Invalid argument(s)\n");
} else {
mutex_lock(&info->property_lock);
*val = property_state->values[property_idx].value;
mutex_unlock(&info->property_lock);
rc = 0;
}
return rc;
}
void *msm_property_alloc_state(struct msm_property_info *info)
{
void *state = NULL;
if (!info) {
DRM_ERROR("invalid property info\n");
return NULL;
}
mutex_lock(&info->property_lock);
if (info->state_cache_size)
state = info->state_cache[--(info->state_cache_size)];
mutex_unlock(&info->property_lock);
if (!state && info->state_size)
state = kmalloc(info->state_size, GFP_KERNEL);
if (!state)
DRM_ERROR("failed to allocate state\n");
return state;
}
/**
* _msm_property_free_state - helper function for freeing local state objects
* @info: Pointer to property info container struct
* @st: Pointer to state object
*/
static void _msm_property_free_state(struct msm_property_info *info, void *st)
{
if (!info || !st)
return;
mutex_lock(&info->property_lock);
if (info->state_cache_size < MSM_PROP_STATE_CACHE_SIZE)
info->state_cache[(info->state_cache_size)++] = st;
else
kfree(st);
mutex_unlock(&info->property_lock);
}
void msm_property_reset_state(struct msm_property_info *info, void *state,
struct msm_property_state *property_state,
struct msm_property_value *property_values)
{
uint32_t i;
if (!info) {
DRM_ERROR("invalid property info\n");
return;
}
if (state)
memset(state, 0, info->state_size);
if (property_state) {
property_state->property_count = info->property_count;
property_state->values = property_values;
INIT_LIST_HEAD(&property_state->dirty_list);
}
/*
* Assign default property values. This helper is mostly used
* to initialize newly created state objects.
*/
if (property_values)
for (i = 0; i < info->property_count; ++i) {
property_values[i].value =
info->property_data[i].default_value;
property_values[i].blob = NULL;
INIT_LIST_HEAD(&property_values[i].dirty_node);
}
}
void msm_property_duplicate_state(struct msm_property_info *info,
void *old_state, void *state,
struct msm_property_state *property_state,
struct msm_property_value *property_values)
{
uint32_t i;
if (!info || !old_state || !state) {
DRM_ERROR("invalid argument(s)\n");
return;
}
memcpy(state, old_state, info->state_size);
if (!property_state)
return;
INIT_LIST_HEAD(&property_state->dirty_list);
property_state->values = property_values;
if (property_state->values)
/* add ref count for blobs and initialize dirty nodes */
for (i = 0; i < info->property_count; ++i) {
if (property_state->values[i].blob)
drm_property_blob_get(
property_state->values[i].blob);
INIT_LIST_HEAD(&property_state->values[i].dirty_node);
}
}
void msm_property_destroy_state(struct msm_property_info *info, void *state,
struct msm_property_state *property_state)
{
uint32_t i;
if (!info || !state) {
DRM_ERROR("invalid argument(s)\n");
return;
}
if (property_state && property_state->values) {
/* remove ref count for blobs */
for (i = 0; i < info->property_count; ++i)
if (property_state->values[i].blob) {
drm_property_blob_put(
property_state->values[i].blob);
property_state->values[i].blob = NULL;
}
}
_msm_property_free_state(info, state);
}
void *msm_property_get_blob(struct msm_property_info *info,
struct msm_property_state *property_state,
size_t *byte_len,
uint32_t property_idx)
{
struct drm_property_blob *blob;
size_t len = 0;
void *rc = 0;
if (!info || !property_state || !property_state->values ||
(property_idx >= info->blob_count)) {
DRM_ERROR("invalid argument(s)\n");
} else {
blob = property_state->values[property_idx].blob;
if (blob) {
len = blob->length;
rc = blob->data;
}
}
if (byte_len)
*byte_len = len;
return rc;
}
int msm_property_set_blob(struct msm_property_info *info,
struct drm_property_blob **blob_reference,
void *blob_data,
size_t byte_len,
uint32_t property_idx)
{
struct drm_property_blob *blob = NULL;
int rc = -EINVAL;
if (!info || !blob_reference || (property_idx >= info->blob_count)) {
DRM_ERROR("invalid argument(s)\n");
} else {
/* create blob */
if (blob_data && byte_len) {
blob = drm_property_create_blob(info->dev,
byte_len,
blob_data);
if (IS_ERR_OR_NULL(blob)) {
rc = PTR_ERR(blob);
DRM_ERROR("failed to create blob, %d\n", rc);
goto exit;
}
}
/* update drm object */
rc = drm_object_property_set_value(info->base,
info->property_array[property_idx],
blob ? blob->base.id : 0);
if (rc) {
DRM_ERROR("failed to set blob to property\n");
if (blob)
drm_property_blob_put(blob);
goto exit;
}
/* update local reference */
if (*blob_reference)
drm_property_blob_put(*blob_reference);
*blob_reference = blob;
}
exit:
return rc;
}
int msm_property_set_property(struct msm_property_info *info,
struct msm_property_state *property_state,
uint32_t property_idx,
uint64_t val)
{
int rc = -EINVAL;
if (!info || (property_idx >= info->property_count) ||
property_idx < info->blob_count ||
!property_state || !property_state->values) {
DRM_ERROR("invalid argument(s)\n");
} else {
struct drm_property *drm_prop;
mutex_lock(&info->property_lock);
/* update cached value */
property_state->values[property_idx].value = val;
/* update the new default value for immutables */
drm_prop = info->property_array[property_idx];
if (drm_prop->flags & DRM_MODE_PROP_IMMUTABLE)
info->property_data[property_idx].default_value = val;
mutex_unlock(&info->property_lock);
/* update drm object */
rc = drm_object_property_set_value(info->base, drm_prop, val);
if (rc)
DRM_ERROR("failed set property value, idx %d rc %d\n",
property_idx, rc);
}
return rc;
}

429
msm/msm_prop.h Normal file
View File

@@ -0,0 +1,429 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _MSM_PROP_H_
#define _MSM_PROP_H_
#include <linux/list.h>
#include "msm_drv.h"
#define MSM_PROP_STATE_CACHE_SIZE 2
/**
* struct msm_property_data - opaque structure for tracking per
* drm-object per property stuff
* @default_value: Default property value for this drm object
* @force_dirty: Always dirty property on incoming sets, rather than checking
* for modified values
*/
struct msm_property_data {
uint64_t default_value;
bool force_dirty;
};
/**
* struct msm_property_value - opaque structure for tracking per
* drm-object per property stuff
* @value: Current property value for this drm object
* @blob: Pointer to associated blob data, if available
* @dirty_node: Linked list node to track if property is dirty or not
*/
struct msm_property_value {
uint64_t value;
struct drm_property_blob *blob;
struct list_head dirty_node;
};
/**
* struct msm_property_info: Structure for property/state helper functions
* @base: Pointer to base drm object (plane/crtc/etc.)
* @dev: Pointer to drm device object
* @property_array: Pointer to array for storing created property objects
* @property_data: Pointer to array for storing private property data
* @property_count: Total number of properties
* @blob_count: Total number of blob properties, should be <= count
* @install_request: Total number of property 'install' requests
* @install_count: Total number of successful 'install' requests
* @recent_idx: Index of property most recently accessed by set/get
* @is_active: Whether or not drm component properties are 'active'
* @state_cache: Cache of local states, to prevent alloc/free thrashing
* @state_size: Size of local state structures
* @state_cache_size: Number of state structures currently stored in state_cache
* @property_lock: Mutex to protect local variables
*/
struct msm_property_info {
struct drm_mode_object *base;
struct drm_device *dev;
struct drm_property **property_array;
struct msm_property_data *property_data;
uint32_t property_count;
uint32_t blob_count;
uint32_t install_request;
uint32_t install_count;
int32_t recent_idx;
bool is_active;
void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
uint32_t state_size;
int32_t state_cache_size;
struct mutex property_lock;
};
/**
* struct msm_property_state - Structure for local property state information
* @property_count: Total number of properties
* @values: Pointer to array of msm_property_value objects
* @dirty_list: List of all properties that have been 'atomic_set' but not
* yet cleared with 'msm_property_pop_dirty'
*/
struct msm_property_state {
uint32_t property_count;
struct msm_property_value *values;
struct list_head dirty_list;
};
/**
* msm_property_index_to_drm_property - get drm property struct from prop index
* @info: Pointer to property info container struct
* @property_idx: Property index
* Returns: drm_property pointer associated with property index
*/
static inline
struct drm_property *msm_property_index_to_drm_property(
struct msm_property_info *info, uint32_t property_idx)
{
if (!info || property_idx >= info->property_count)
return NULL;
return info->property_array[property_idx];
}
/**
* msm_property_get_default - query default value of a property
* @info: Pointer to property info container struct
* @property_idx: Property index
* Returns: Default value for specified property
*/
static inline
uint64_t msm_property_get_default(struct msm_property_info *info,
uint32_t property_idx)
{
uint64_t rc = 0;
if (!info)
return 0;
mutex_lock(&info->property_lock);
if (property_idx < info->property_count)
rc = info->property_data[property_idx].default_value;
mutex_unlock(&info->property_lock);
return rc;
}
/**
* msm_property_set_is_active - set overall 'active' status for all properties
* @info: Pointer to property info container struct
* @is_active: New 'is active' status
*/
static inline
void msm_property_set_is_active(struct msm_property_info *info, bool is_active)
{
if (info) {
mutex_lock(&info->property_lock);
info->is_active = is_active;
mutex_unlock(&info->property_lock);
}
}
/**
* msm_property_get_is_active - query property 'is active' status
* @info: Pointer to property info container struct
* Returns: Current 'is active's status
*/
static inline
bool msm_property_get_is_active(struct msm_property_info *info)
{
bool rc = false;
if (info) {
mutex_lock(&info->property_lock);
rc = info->is_active;
mutex_unlock(&info->property_lock);
}
return rc;
}
/**
* msm_property_pop_dirty - determine next dirty property and clear
* its dirty flag
* @info: Pointer to property info container struct
* @property_state: Pointer to property state container struct
* Returns: Valid msm property index on success,
* -EAGAIN if no dirty properties are available
* Property indicies returned from this function are similar
* to those returned by the msm_property_index function.
*/
int msm_property_pop_dirty(struct msm_property_info *info,
struct msm_property_state *property_state);
/**
* msm_property_init - initialize property info structure
* @info: Pointer to property info container struct
* @base: Pointer to base drm object (plane/crtc/etc.)
* @dev: Pointer to drm device object
* @property_array: Pointer to array for storing created property objects
* @property_data: Pointer to array for storing private property data
* @property_count: Total number of properties
* @blob_count: Total number of blob properties, should be <= count
* @state_size: Size of local state object
*/
void msm_property_init(struct msm_property_info *info,
struct drm_mode_object *base,
struct drm_device *dev,
struct drm_property **property_array,
struct msm_property_data *property_data,
uint32_t property_count,
uint32_t blob_count,
uint32_t state_size);
/**
* msm_property_destroy - destroy helper info structure
*
* @info: Pointer to property info container struct
*/
void msm_property_destroy(struct msm_property_info *info);
/**
* msm_property_install_range - install standard drm range property
* @info: Pointer to property info container struct
* @name: Property name
* @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
* @min: Min property value
* @max: Max property value
* @init: Default Property value
* @property_idx: Property index
*/
void msm_property_install_range(struct msm_property_info *info,
const char *name,
int flags,
uint64_t min,
uint64_t max,
uint64_t init,
uint32_t property_idx);
/**
* msm_property_install_volatile_range - install drm range property
* This function is similar to msm_property_install_range, but assumes
* that the property is meant for holding user pointers or descriptors
* that may reference volatile data without having an updated value.
* @info: Pointer to property info container struct
* @name: Property name
* @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
* @min: Min property value
* @max: Max property value
* @init: Default Property value
* @property_idx: Property index
*/
void msm_property_install_volatile_range(struct msm_property_info *info,
const char *name,
int flags,
uint64_t min,
uint64_t max,
uint64_t init,
uint32_t property_idx);
/**
* msm_property_install_enum - install standard drm enum/bitmask property
* @info: Pointer to property info container struct
* @name: Property name
* @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
* @is_bitmask: Set to non-zero to create a bitmask property, rather than an
* enumeration one
* @values: Array of allowable enumeration/bitmask values
* @num_values: Size of values array
* @property_idx: Property index
*/
void msm_property_install_enum(struct msm_property_info *info,
const char *name,
int flags,
int is_bitmask,
const struct drm_prop_enum_list *values,
int num_values,
uint32_t property_idx);
/**
* msm_property_install_blob - install standard drm blob property
* @info: Pointer to property info container struct
* @name: Property name
* @flags: Extra flags for property creation
* @property_idx: Property index
*/
void msm_property_install_blob(struct msm_property_info *info,
const char *name,
int flags,
uint32_t property_idx);
/**
* msm_property_install_get_status - query overal status of property additions
* @info: Pointer to property info container struct
* Returns: Zero if previous property install calls were all successful
*/
int msm_property_install_get_status(struct msm_property_info *info);
/**
* msm_property_index - determine property index from drm_property ptr
* @info: Pointer to property info container struct
* @property: Incoming property pointer
* Returns: Valid property index, or -EINVAL on error
*/
int msm_property_index(struct msm_property_info *info,
struct drm_property *property);
/**
* msm_property_set_dirty - forcibly flag a property as dirty
* @info: Pointer to property info container struct
* @property_state: Pointer to property state container struct
* @property_idx: Property index
* Returns: Zero on success
*/
int msm_property_set_dirty(struct msm_property_info *info,
struct msm_property_state *property_state,
int property_idx);
/**
* msm_property_is_dirty - check whether a property is dirty
* Note: Intended for use during atomic_check before pop_dirty usage
* @info: Pointer to property info container struct
* @property_state: Pointer to property state container struct
* @property_idx: Property index
* Returns: true if dirty, false otherwise
*/
bool msm_property_is_dirty(
struct msm_property_info *info,
struct msm_property_state *property_state,
uint32_t property_idx);
/**
* msm_property_atomic_set - helper function for atomic property set callback
* @info: Pointer to property info container struct
* @property_state: Pointer to local state structure
* @property: Incoming property pointer
* @val: Incoming property value
* Returns: Zero on success
*/
int msm_property_atomic_set(struct msm_property_info *info,
struct msm_property_state *property_state,
struct drm_property *property,
uint64_t val);
/**
* msm_property_atomic_get - helper function for atomic property get callback
* @info: Pointer to property info container struct
* @property_state: Pointer to local state structure
* @property: Incoming property pointer
* @val: Pointer to variable for receiving property value
* Returns: Zero on success
*/
int msm_property_atomic_get(struct msm_property_info *info,
struct msm_property_state *property_state,
struct drm_property *property,
uint64_t *val);
/**
* msm_property_alloc_state - helper function for allocating local state objects
* @info: Pointer to property info container struct
*/
void *msm_property_alloc_state(struct msm_property_info *info);
/**
* msm_property_reset_state - helper function for state reset callback
* @info: Pointer to property info container struct
* @state: Pointer to local state structure
* @property_state: Pointer to property state container struct
* @property_values: Pointer to property values cache array
*/
void msm_property_reset_state(struct msm_property_info *info, void *state,
struct msm_property_state *property_state,
struct msm_property_value *property_values);
/**
* msm_property_duplicate_state - helper function for duplicate state cb
* @info: Pointer to property info container struct
* @old_state: Pointer to original state structure
* @state: Pointer to newly created state structure
* @property_state: Pointer to destination property state container struct
* @property_values: Pointer to property values cache array
*/
void msm_property_duplicate_state(struct msm_property_info *info,
void *old_state,
void *state,
struct msm_property_state *property_state,
struct msm_property_value *property_values);
/**
* msm_property_destroy_state - helper function for destroy state cb
* @info: Pointer to property info container struct
* @state: Pointer to local state structure
* @property_state: Pointer to property state container struct
*/
void msm_property_destroy_state(struct msm_property_info *info,
void *state,
struct msm_property_state *property_state);
/**
* msm_property_get_blob - obtain cached data pointer for drm blob property
* @info: Pointer to property info container struct
* @property_state: Pointer to property state container struct
* @byte_len: Optional pointer to variable for accepting blob size
* @property_idx: Property index
* Returns: Pointer to blob data
*/
void *msm_property_get_blob(struct msm_property_info *info,
struct msm_property_state *property_state,
size_t *byte_len,
uint32_t property_idx);
/**
* msm_property_set_blob - update blob property on a drm object
* This function updates the blob property value of the given drm object. Its
* intended use is to update blob properties that have been created with the
* DRM_MODE_PROP_IMMUTABLE flag set.
* @info: Pointer to property info container struct
* @blob_reference: Reference to a pointer that holds the created data blob
* @blob_data: Pointer to blob data
* @byte_len: Length of blob data, in bytes
* @property_idx: Property index
* Returns: Zero on success
*/
int msm_property_set_blob(struct msm_property_info *info,
struct drm_property_blob **blob_reference,
void *blob_data,
size_t byte_len,
uint32_t property_idx);
/**
* msm_property_set_property - update property on a drm object
* This function updates the property value of the given drm object. Its
* intended use is to update properties that have been created with the
* DRM_MODE_PROP_IMMUTABLE flag set.
* Note: This function cannot be called on a blob.
* @info: Pointer to property info container struct
* @property_state: Pointer to property state container struct
* @property_idx: Property index
* @val: value of the property to set
* Returns: Zero on success
*/
int msm_property_set_property(struct msm_property_info *info,
struct msm_property_state *property_state,
uint32_t property_idx,
uint64_t val);
#endif /* _MSM_PROP_H_ */

502
msm/msm_smmu.c Normal file
View File

@@ -0,0 +1,502 @@
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <asm/dma-iommu.h>
#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "sde_dbg.h"
struct msm_smmu_client {
struct device *dev;
struct iommu_domain *domain;
bool domain_attached;
bool secure;
};
struct msm_smmu {
struct msm_mmu base;
struct device *client_dev;
struct msm_smmu_client *client;
};
struct msm_smmu_domain {
const char *label;
bool secure;
};
#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
#define msm_smmu_to_client(smmu) (smmu->client)
static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
int rc = 0;
if (!client) {
pr_err("undefined smmu client\n");
return -EINVAL;
}
/* domain attach only once */
if (client->domain_attached)
return 0;
rc = iommu_attach_device(client->domain, client->dev);
if (rc) {
dev_err(client->dev, "iommu attach dev failed (%d)\n", rc);
return rc;
}
client->domain_attached = true;
dev_dbg(client->dev, "iommu domain attached\n");
return 0;
}
static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
if (!client) {
pr_err("undefined smmu client\n");
return;
}
if (!client->domain_attached)
return;
pm_runtime_get_sync(mmu->dev);
iommu_detach_device(client->domain, client->dev);
pm_runtime_put_sync(mmu->dev);
client->domain_attached = false;
dev_dbg(client->dev, "iommu domain detached\n");
}
static int msm_smmu_set_attribute(struct msm_mmu *mmu,
enum iommu_attr attr, void *data)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
int ret = 0;
if (!client || !client->domain)
return -ENODEV;
ret = iommu_domain_set_attr(client->domain, attr, data);
if (ret)
DRM_ERROR("set domain attribute failed:%d\n", ret);
return ret;
}
static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
uint32_t dest_address, uint32_t size)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
int ret = 0;
if (!client || !client->domain)
return -ENODEV;
ret = iommu_unmap(client->domain, dest_address, size);
if (ret != size)
pr_err("smmu unmap failed\n");
return 0;
}
static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
uint32_t dest_address, uint32_t size, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
int ret = 0;
if (!client || !client->domain)
return -ENODEV;
ret = iommu_map(client->domain, dest_address, dest_address,
size, prot);
if (ret)
pr_err("smmu map failed\n");
return ret;
}
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned int len, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
size_t ret = 0;
if (sgt && sgt->sgl) {
ret = iommu_map_sg(client->domain, iova, sgt->sgl,
sgt->nents, prot);
WARN_ON((int)ret < 0);
DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address,
sgt->sgl->dma_length, prot);
SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, prot);
}
return (ret == len) ? 0 : -EINVAL;
}
static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned int len)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
pm_runtime_get_sync(mmu->dev);
iommu_unmap(client->domain, iova, len);
pm_runtime_put_sync(mmu->dev);
return 0;
}
static void msm_smmu_destroy(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct platform_device *pdev = to_platform_device(smmu->client_dev);
if (smmu->client_dev)
platform_device_unregister(pdev);
kfree(smmu);
}
struct device *msm_smmu_get_dev(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
return smmu->client_dev;
}
static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
int dir, u32 flags)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
unsigned long attrs = 0x0;
int ret;
if (!sgt || !client) {
DRM_ERROR("sg table is invalid\n");
return -ENOMEM;
}
/*
* For import buffer type, dma_map_sg_attrs is called during
* dma_buf_map_attachment and is not required to call again
*/
if (!(flags & MSM_BO_EXTBUF)) {
ret = dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
attrs);
if (!ret) {
DRM_ERROR("dma map sg failed\n");
return -ENOMEM;
}
}
if (sgt && sgt->sgl) {
DRM_DEBUG("%pad/0x%x/0x%x/0x%lx\n",
&sgt->sgl->dma_address, sgt->sgl->dma_length,
dir, attrs);
SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
dir, attrs, client->secure);
}
return 0;
}
static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
int dir, u32 flags)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
if (!sgt || !client) {
DRM_ERROR("sg table is invalid\n");
return;
}
if (sgt->sgl) {
DRM_DEBUG("%pad/0x%x/0x%x\n",
&sgt->sgl->dma_address, sgt->sgl->dma_length,
dir);
SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
dir, client->secure);
}
if (!(flags & MSM_BO_EXTBUF))
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
}
static bool msm_smmu_is_domain_secure(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
return client->secure;
}
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
.unmap = msm_smmu_unmap,
.map_dma_buf = msm_smmu_map_dma_buf,
.unmap_dma_buf = msm_smmu_unmap_dma_buf,
.destroy = msm_smmu_destroy,
.is_domain_secure = msm_smmu_is_domain_secure,
.set_attribute = msm_smmu_set_attribute,
.one_to_one_map = msm_smmu_one_to_one_map,
.one_to_one_unmap = msm_smmu_one_to_one_unmap,
.get_dev = msm_smmu_get_dev,
};
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
[MSM_SMMU_DOMAIN_UNSECURE] = {
.label = "mdp_ns",
.secure = false,
},
[MSM_SMMU_DOMAIN_SECURE] = {
.label = "mdp_s",
.secure = true,
},
[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
.label = "rot_ns",
.secure = false,
},
[MSM_SMMU_DOMAIN_NRT_SECURE] = {
.label = "rot_s",
.secure = true,
},
};
static const struct of_device_id msm_smmu_dt_match[] = {
{ .compatible = "qcom,smmu_sde_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
{ .compatible = "qcom,smmu_sde_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
{ .compatible = "qcom,smmu_sde_nrt_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
{ .compatible = "qcom,smmu_sde_nrt_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
{}
};
MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
static struct device *msm_smmu_device_create(struct device *dev,
enum msm_mmu_domain_type domain,
struct msm_smmu *smmu)
{
struct device_node *child;
struct platform_device *pdev;
int i;
const char *compat = NULL;
for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
compat = msm_smmu_dt_match[i].compatible;
break;
}
}
if (!compat) {
DRM_DEBUG("unable to find matching domain for %d\n", domain);
return ERR_PTR(-ENOENT);
}
DRM_DEBUG("found domain %d compat: %s\n", domain, compat);
child = of_find_compatible_node(dev->of_node, NULL, compat);
if (!child) {
DRM_DEBUG("unable to find compatible node for %s\n", compat);
return ERR_PTR(-ENODEV);
}
pdev = of_platform_device_create(child, NULL, dev);
if (!pdev) {
DRM_ERROR("unable to create smmu platform dev for domain %d\n",
domain);
return ERR_PTR(-ENODEV);
}
smmu->client = platform_get_drvdata(pdev);
return &pdev->dev;
}
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain)
{
struct msm_smmu *smmu;
struct device *client_dev;
smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
if (!smmu)
return ERR_PTR(-ENOMEM);
client_dev = msm_smmu_device_create(dev, domain, smmu);
if (IS_ERR(client_dev)) {
kfree(smmu);
return (void *)client_dev ? : ERR_PTR(-ENODEV);
}
smmu->client_dev = client_dev;
msm_mmu_init(&smmu->base, dev, &funcs);
return &smmu->base;
}
static int msm_smmu_fault_handler(struct iommu_domain *domain,
struct device *dev, unsigned long iova,
int flags, void *token)
{
struct msm_smmu_client *client;
int rc = -EINVAL;
if (!token) {
DRM_ERROR("Error: token is NULL\n");
return -EINVAL;
}
client = (struct msm_smmu_client *)token;
/* see iommu.h for fault flags definition */
SDE_EVT32(iova, flags);
DRM_ERROR("trigger dump, iova=0x%08lx, flags=0x%x\n", iova, flags);
DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
/*
* return -ENOSYS to allow smmu driver to dump out useful
* debug info.
*/
return rc;
}
/**
* msm_smmu_probe()
* @pdev: platform device
*
* Each smmu context acts as a separate device and the context banks are
* configured with a VA range.
* Registers the clks as each context bank has its own clks, for which voting
* has to be done everytime before using that context bank.
*/
static int msm_smmu_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct msm_smmu_client *client;
const struct msm_smmu_domain *domain;
match = of_match_device(msm_smmu_dt_match, &pdev->dev);
if (!match || !match->data) {
dev_err(&pdev->dev, "probe failed as match data is invalid\n");
return -EINVAL;
}
domain = match->data;
if (!domain) {
dev_err(&pdev->dev, "no matching device found\n");
return -EINVAL;
}
DRM_INFO("probing device %s\n", match->compatible);
client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
if (!client)
return -ENOMEM;
client->dev = &pdev->dev;
client->domain = iommu_get_domain_for_dev(client->dev);
if (!client->domain) {
dev_err(&pdev->dev, "iommu get domain for dev failed\n");
return -EINVAL;
}
if (!client->dev->dma_parms)
client->dev->dma_parms = devm_kzalloc(client->dev,
sizeof(*client->dev->dma_parms), GFP_KERNEL);
dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));
iommu_set_fault_handler(client->domain,
msm_smmu_fault_handler, (void *)client);
DRM_INFO("Created domain %s, secure=%d\n",
domain->label, domain->secure);
platform_set_drvdata(pdev, client);
return 0;
}
static int msm_smmu_remove(struct platform_device *pdev)
{
struct msm_smmu_client *client;
client = platform_get_drvdata(pdev);
client->domain_attached = false;
return 0;
}
static struct platform_driver msm_smmu_driver = {
.probe = msm_smmu_probe,
.remove = msm_smmu_remove,
.driver = {
.name = "msmdrm_smmu",
.of_match_table = msm_smmu_dt_match,
.suppress_bind_attrs = true,
},
};
int __init msm_smmu_driver_init(void)
{
int ret;
ret = platform_driver_register(&msm_smmu_driver);
if (ret)
pr_err("mdss_smmu_register_driver() failed!\n");
return ret;
}
void __exit msm_smmu_driver_cleanup(void)
{
platform_driver_unregister(&msm_smmu_driver);
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM SMMU driver");

96
msm/sde/sde_ad4.h Normal file
View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_AD4_H_
#define _SDE_AD4_H_
#include <drm/drm_mode.h>
#include <drm/drm_property.h>
#include "sde_hw_dspp.h"
/**
* enum ad4_modes - ad4 modes supported by driver
*/
enum ad4_modes {
AD4_OFF,
AD4_AUTO_STRENGTH,
AD4_CALIBRATION,
AD4_MANUAL,
};
/**
* struct drm_prop_enum_list - drm structure for creating enum property and
* enumerating values
*/
static const struct drm_prop_enum_list ad4_modes[] = {
{AD4_OFF, "off"},
{AD4_AUTO_STRENGTH, "auto_strength_mode"},
{AD4_CALIBRATION, "calibration_mode"},
{AD4_MANUAL, "manual_mode"},
};
/**
* enum ad_property - properties that can be set for ad
*/
enum ad_property {
AD_MODE,
AD_INIT,
AD_CFG,
AD_INPUT,
AD_SUSPEND,
AD_ASSERTIVE,
AD_BACKLIGHT,
AD_STRENGTH,
AD_ROI,
AD_IPC_SUSPEND,
AD_IPC_RESUME,
AD_IPC_RESET,
AD_PROPMAX,
};
/**
* enum ad_intr_resp_property - ad4 interrupt response enum
*/
enum ad_intr_resp_property {
AD4_IN_OUT_BACKLIGHT,
AD4_RESPMAX,
};
/**
* struct sde_ad_hw_cfg - structure for setting the ad properties
* @prop: enum of ad property
* @hw_cfg: payload for the prop being set.
*/
struct sde_ad_hw_cfg {
enum ad_property prop;
struct sde_hw_cp_cfg *hw_cfg;
};
/**
* sde_validate_dspp_ad4() - api to validate if ad property is allowed for
* the display with allocated dspp/mixers.
* @dspp: pointer to dspp info structure.
* @prop: pointer to u32 pointing to ad property
*/
int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop);
/**
* sde_setup_dspp_ad4 - api to apply the ad property, sde_validate_dspp_ad4
* should be called before call this function
* @dspp: pointer to dspp info structure.
* @cfg: pointer to struct sde_ad_hw_cfg
*/
void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
/**
* sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
* @dspp: pointer to dspp object
* @event: event for which response is needed
* @resp_in: read ad4 input value of event requested
* @resp_out: read ad4 output value of event requested
*/
void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
u32 *resp_in, u32 *resp_out);
#endif /* _SDE_AD4_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,185 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_COLOR_PROCESSING_H
#define _SDE_COLOR_PROCESSING_H
#include <drm/drm_crtc.h>
struct sde_irq_callback;
/*
* PA MEMORY COLOR types
* @MEMCOLOR_SKIN Skin memory color type
* @MEMCOLOR_SKY Sky memory color type
* @MEMCOLOR_FOLIAGE Foliage memory color type
*/
enum sde_memcolor_type {
MEMCOLOR_SKIN = 0,
MEMCOLOR_SKY,
MEMCOLOR_FOLIAGE,
MEMCOLOR_MAX
};
/*
* PA HISTOGRAM modes
* @HIST_DISABLED Histogram disabled
* @HIST_ENABLED Histogram enabled
*/
enum sde_hist_modes {
HIST_DISABLED,
HIST_ENABLED
};
/**
* struct drm_prop_enum_list - drm structure for creating enum property and
* enumerating values
*/
static const struct drm_prop_enum_list sde_hist_modes[] = {
{HIST_DISABLED, "hist_off"},
{HIST_ENABLED, "hist_on"},
};
/*
* LTM HISTOGRAM modes
* @LTM_HIST_DISABLED Histogram disabled
* @LTM_HIST_ENABLED Histogram enabled
*/
enum ltm_hist_modes {
LTM_HIST_DISABLED,
LTM_HIST_ENABLED
};
/**
* struct drm_prop_enum_list - drm structure for creating enum property and
* enumerating values
*/
static const struct drm_prop_enum_list sde_ltm_hist_modes[] = {
{LTM_HIST_DISABLED, "ltm_hist_off"},
{LTM_HIST_ENABLED, "ltm_hist_on"},
};
/**
* sde_cp_crtc_init(): Initialize color processing lists for a crtc.
* Should be called during crtc initialization.
* @crtc: Pointer to sde_crtc.
*/
void sde_cp_crtc_init(struct drm_crtc *crtc);
/**
* sde_cp_crtc_install_properties(): Installs the color processing
* properties for a crtc.
* Should be called during crtc initialization.
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_install_properties(struct drm_crtc *crtc);
/**
* sde_cp_crtc_destroy_properties: Destroys color processing
* properties for a crtc.
* should be called during crtc de-initialization.
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc);
/**
* sde_cp_crtc_set_property: Set a color processing property
* for a crtc.
* Should be during atomic set property.
* @crtc: Pointer to crtc.
* @property: Property that needs to enabled/disabled.
* @val: Value of property.
*/
int sde_cp_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
/**
* sde_cp_crtc_apply_properties: Enable/disable properties
* for a crtc.
* Should be called during atomic commit call.
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
/**
* sde_cp_crtc_get_property: Get value of color processing property
* for a crtc.
* Should be during atomic get property.
* @crtc: Pointer to crtc.
* @property: Property that needs to enabled/disabled.
* @val: Value of property.
*
*/
int sde_cp_crtc_get_property(struct drm_crtc *crtc,
struct drm_property *property, uint64_t *val);
/**
* sde_cp_crtc_suspend: Suspend the crtc features
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_suspend(struct drm_crtc *crtc);
/**
* sde_cp_crtc_resume: Resume the crtc features
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_resume(struct drm_crtc *crtc);
/**
* sde_cp_crtc_clear: Clear the active list and dirty list of crtc features
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_clear(struct drm_crtc *crtc);
/**
* sde_cp_ad_interrupt: Api to enable/disable ad interrupt
* @crtc: Pointer to crtc.
* @en: Variable to enable/disable interrupt.
* @irq: Pointer to irq callback
*/
int sde_cp_ad_interrupt(struct drm_crtc *crtc, bool en,
struct sde_irq_callback *irq);
/**
* sde_cp_crtc_pre_ipc: Handle color processing features
* before entering IPC
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_pre_ipc(struct drm_crtc *crtc);
/**
* sde_cp_crtc_post_ipc: Handle color processing features
* after exiting IPC
* @crtc: Pointer to crtc.
*/
void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
/**
* sde_cp_hist_interrupt: Api to enable/disable histogram interrupt
* @crtc: Pointer to crtc.
* @en: Variable to enable/disable interrupt.
* @irq: Pointer to irq callback
*/
int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
struct sde_irq_callback *hist_irq);
/**
* sde_cp_ltm_hist_interrupt: API to enable/disable LTM hist interrupt
* @crtc: Pointer to crtc.
* @en: Variable to enable/disable interrupt.
* @irq: Pointer to irq callback
*/
int sde_cp_ltm_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
struct sde_irq_callback *hist_irq);
/**
* sde_cp_ltm_wb_pb_interrupt: API to enable/disable LTM wb_pb interrupt
* @crtc: Pointer to crtc.
* @en: Variable to enable/disable interrupt.
* @irq: Pointer to irq callback
*/
int sde_cp_ltm_wb_pb_interrupt(struct drm_crtc *crtc_drm, bool en,
struct sde_irq_callback *hist_irq);
#endif /*_SDE_COLOR_PROCESSING_H */

2558
msm/sde/sde_connector.c Normal file

File diff suppressed because it is too large Load Diff

908
msm/sde/sde_connector.h Normal file
View File

@@ -0,0 +1,908 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_CONNECTOR_H_
#define _SDE_CONNECTOR_H_
#include <uapi/drm/msm_drm_pp.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "msm_prop.h"
#include "sde_kms.h"
#include "sde_fence.h"
#define SDE_CONNECTOR_NAME_SIZE 16
#define SDE_CONNECTOR_DHDR_MEMPOOL_MAX_SIZE SZ_32
struct sde_connector;
struct sde_connector_state;
/**
* struct sde_connector_ops - callback functions for generic sde connector
* Individual callbacks documented below.
*/
struct sde_connector_ops {
/**
* post_init - perform additional initialization steps
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: Zero on success
*/
int (*post_init)(struct drm_connector *connector,
void *display);
/**
* set_info_blob - initialize given info blob
* @connector: Pointer to drm connector structure
* @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
* @mode_info: Pointer to mode info structure
* Returns: Zero on success
*/
int (*set_info_blob)(struct drm_connector *connector,
void *info,
void *display,
struct msm_mode_info *mode_info);
/**
* detect - determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
* @display: Pointer to private display handle
* Returns: Connector 'is connected' status
*/
enum drm_connector_status (*detect)(struct drm_connector *connector,
bool force,
void *display);
/**
* get_modes - add drm modes via drm_mode_probed_add()
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: Number of modes added
*/
int (*get_modes)(struct drm_connector *connector,
void *display);
/**
* update_pps - update pps command for the display panel
* @connector: Pointer to drm connector structure
* @pps_cmd: Pointer to pps command
* @display: Pointer to private display handle
* Returns: Zero on success
*/
int (*update_pps)(struct drm_connector *connector,
char *pps_cmd, void *display);
/**
* mode_valid - determine if specified mode is valid
* @connector: Pointer to drm connector structure
* @mode: Pointer to drm mode structure
* @display: Pointer to private display handle
* Returns: Validity status for specified mode
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode,
void *display);
/**
* set_property - set property value
* @connector: Pointer to drm connector structure
* @state: Pointer to drm connector state structure
* @property_index: DRM property index
* @value: Incoming property value
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int (*set_property)(struct drm_connector *connector,
struct drm_connector_state *state,
int property_index,
uint64_t value,
void *display);
/**
* get_property - get property value
* @connector: Pointer to drm connector structure
* @state: Pointer to drm connector state structure
* @property_index: DRM property index
* @value: Pointer to variable for accepting property value
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int (*get_property)(struct drm_connector *connector,
struct drm_connector_state *state,
int property_index,
uint64_t *value,
void *display);
/**
* get_info - get display information
* @connector: Pointer to drm connector structure
* @info: Pointer to msm display info structure
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int (*get_info)(struct drm_connector *connector,
struct msm_display_info *info, void *display);
/**
* get_mode_info - retrieve mode information
* @connector: Pointer to drm connector structure
* @drm_mode: Display mode set for the display
* @mode_info: Out parameter. information of the display mode
* @max_mixer_width: max width supported by HW layer mixer
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int (*get_mode_info)(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info,
u32 max_mixer_width, void *display);
/**
* enable_event - notify display of event registration/unregistration
* @connector: Pointer to drm connector structure
* @event_idx: SDE connector event index
* @enable: Whether the event is being enabled/disabled
* @display: Pointer to private display structure
*/
void (*enable_event)(struct drm_connector *connector,
uint32_t event_idx, bool enable, void *display);
/**
* set_backlight - set backlight level
* @connector: Pointer to drm connector structure
* @display: Pointer to private display structure
* @bl_lvel: Backlight level
*/
int (*set_backlight)(struct drm_connector *connector,
void *display, u32 bl_lvl);
/**
* soft_reset - perform a soft reset on the connector
* @display: Pointer to private display structure
* Return: Zero on success, -ERROR otherwise
*/
int (*soft_reset)(void *display);
/**
* pre_kickoff - trigger display to program kickoff-time features
* @connector: Pointer to drm connector structure
* @display: Pointer to private display structure
* @params: Parameter bundle of connector-stored information for
* kickoff-time programming into the display
* Returns: Zero on success
*/
int (*pre_kickoff)(struct drm_connector *connector,
void *display,
struct msm_display_kickoff_params *params);
/**
* clk_ctrl - perform clk enable/disable on the connector
* @handle: Pointer to clk handle
* @type: Type of clks
* @enable: State of clks
*/
int (*clk_ctrl)(void *handle, u32 type, u32 state);
/**
* set_power - update dpms setting
* @connector: Pointer to drm connector structure
* @power_mode: One of the following,
* SDE_MODE_DPMS_ON
* SDE_MODE_DPMS_LP1
* SDE_MODE_DPMS_LP2
* SDE_MODE_DPMS_OFF
* @display: Pointer to private display structure
* Returns: Zero on success
*/
int (*set_power)(struct drm_connector *connector,
int power_mode, void *display);
/**
* get_dst_format - get dst_format from display
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: dst_format of display
*/
enum dsi_pixel_format (*get_dst_format)(struct drm_connector *connector,
void *display);
/**
* post_kickoff - display to program post kickoff-time features
* @connector: Pointer to drm connector structure
* Returns: Zero on success
*/
int (*post_kickoff)(struct drm_connector *connector);
/**
* post_open - calls connector to process post open functionalities
* @display: Pointer to private display structure
*/
void (*post_open)(struct drm_connector *connector, void *display);
/**
* check_status - check status of connected display panel
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* @te_check_override: Whether check TE from panel or default check
* Returns: positive value for success, negetive or zero for failure
*/
int (*check_status)(struct drm_connector *connector, void *display,
bool te_check_override);
/**
* cmd_transfer - Transfer command to the connected display panel
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* @cmd_buf: Command buffer
* @cmd_buf_len: Command buffer length in bytes
* Returns: Zero for success, negetive for failure
*/
int (*cmd_transfer)(struct drm_connector *connector,
void *display, const char *cmd_buf,
u32 cmd_buf_len);
/**
* config_hdr - configure HDR
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* @c_state: Pointer to connector state
* Returns: Zero on success, negative error code for failures
*/
int (*config_hdr)(struct drm_connector *connector, void *display,
struct sde_connector_state *c_state);
/**
* atomic_best_encoder - atomic best encoder selection for connector
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* @c_state: Pointer to connector state
* Returns: valid drm_encoder for success
*/
struct drm_encoder *(*atomic_best_encoder)(
struct drm_connector *connector,
void *display,
struct drm_connector_state *c_state);
/**
* atomic_check - atomic check handling for connector
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* @c_state: Pointer to connector state
* Returns: valid drm_encoder for success
*/
int (*atomic_check)(struct drm_connector *connector,
void *display,
struct drm_connector_state *c_state);
/**
* pre_destroy - handle pre destroy operations for the connector
* @connector: Pointer to drm connector structure
* @display: Pointer to private display handle
* Returns: Zero on success, negative error code for failures
*/
void (*pre_destroy)(struct drm_connector *connector, void *display);
/**
* cont_splash_config - initialize splash resources
* @display: Pointer to private display handle
* Returns: zero for success, negetive for failure
*/
int (*cont_splash_config)(void *display);
/**
* get_panel_vfp - returns original panel vfp
* @display: Pointer to private display handle
* @h_active: width
* @v_active: height
* Returns: v_front_porch on success error-code on failure
*/
int (*get_panel_vfp)(void *display, int h_active, int v_active);
/**
* get_default_lm - returns default number of lm
* @display: Pointer to private display handle
* @num_lm: Pointer to number of lms to be populated
* Returns: zero for success, negetive for failure
*/
int (*get_default_lms)(void *display, u32 *num_lm);
};
/**
* enum sde_connector_events - list of recognized connector events
*/
enum sde_connector_events {
SDE_CONN_EVENT_VID_DONE, /* video mode frame done */
SDE_CONN_EVENT_CMD_DONE, /* command mode frame done */
SDE_CONN_EVENT_VID_FIFO_OVERFLOW, /* dsi fifo overflow error */
SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW, /* dsi fifo underflow error */
SDE_CONN_EVENT_COUNT,
};
/**
* struct sde_connector_evt - local event registration entry structure
* @cb_func: Pointer to desired callback function
* @usr: User pointer to pass to callback on event trigger
* Returns: Zero success, negetive for failure
*/
struct sde_connector_evt {
int (*cb_func)(uint32_t event_idx,
uint32_t instance_idx, void *usr,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3);
void *usr;
};
struct sde_connector_dyn_hdr_metadata {
u8 dynamic_hdr_payload[SDE_CONNECTOR_DHDR_MEMPOOL_MAX_SIZE];
int dynamic_hdr_payload_size;
bool dynamic_hdr_update;
};
/**
* struct sde_connector - local sde connector structure
* @base: Base drm connector structure
* @connector_type: Set to one of DRM_MODE_CONNECTOR_ types
* @encoder: Pointer to preferred drm encoder
* @panel: Pointer to drm panel, if present
* @display: Pointer to private display data structure
* @drv_panel: Pointer to interface driver's panel module, if present
* @mst_port: Pointer to mst port, if present
* @mmu_secure: MMU id for secure buffers
* @mmu_unsecure: MMU id for unsecure buffers
* @name: ASCII name of connector
* @lock: Mutex lock object for this structure
* @retire_fence: Retire fence context reference
* @ops: Local callback function pointer table
* @dpms_mode: DPMS property setting from user space
* @lp_mode: LP property setting from user space
* @last_panel_power_mode: Last consolidated dpms/lp mode setting
* @property_info: Private structure for generic property handling
* @property_data: Array of private data for generic property handling
* @blob_caps: Pointer to blob structure for 'capabilities' property
* @blob_hdr: Pointer to blob structure for 'hdr_properties' property
* @blob_ext_hdr: Pointer to blob structure for 'ext_hdr_properties' property
* @blob_dither: Pointer to blob structure for default dither config
* @blob_mode_info: Pointer to blob structure for mode info
* @fb_kmap: true if kernel mapping of framebuffer is requested
* @event_table: Array of registered events
* @event_lock: Lock object for event_table
* @bl_device: backlight device node
* @status_work: work object to perform status checks
* @esd_status_interval: variable to change ESD check interval in millisec
* @panel_dead: Flag to indicate if panel has gone bad
* @esd_status_check: Flag to indicate if ESD thread is scheduled or not
* @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
* @bl_scale: BL scale value for ABA feature
* @bl_scale_sv: BL scale value for sunlight visibility feature
* @unset_bl_level: BL level that needs to be set later
* @allow_bl_update: Flag to indicate if BL update is allowed currently or not
* @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
* @qsync_updated: Qsync settings were updated
* last_cmd_tx_sts: status of the last command transfer
* @hdr_capable: external hdr support present
* @core_clk_rate: MDP core clk rate used for dynamic HDR packet calculation
*/
struct sde_connector {
struct drm_connector base;
int connector_type;
struct drm_encoder *encoder;
struct drm_panel *panel;
void *display;
void *drv_panel;
void *mst_port;
struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
struct mutex lock;
struct sde_fence_context *retire_fence;
struct sde_connector_ops ops;
int dpms_mode;
int lp_mode;
int last_panel_power_mode;
struct msm_property_info property_info;
struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
struct drm_property_blob *blob_caps;
struct drm_property_blob *blob_hdr;
struct drm_property_blob *blob_ext_hdr;
struct drm_property_blob *blob_dither;
struct drm_property_blob *blob_mode_info;
bool fb_kmap;
struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
spinlock_t event_lock;
struct backlight_device *bl_device;
struct delayed_work status_work;
u32 esd_status_interval;
bool panel_dead;
bool esd_status_check;
bool bl_scale_dirty;
u32 bl_scale;
u32 bl_scale_sv;
u32 unset_bl_level;
bool allow_bl_update;
u32 qsync_mode;
bool qsync_updated;
bool last_cmd_tx_sts;
bool hdr_capable;
};
/**
* to_sde_connector - convert drm_connector pointer to sde connector pointer
* @X: Pointer to drm_connector structure
* Returns: Pointer to sde_connector structure
*/
#define to_sde_connector(x) container_of((x), struct sde_connector, base)
/**
* sde_connector_get_display - get sde connector's private display pointer
* @C: Pointer to drm connector structure
* Returns: Pointer to associated private display structure
*/
#define sde_connector_get_display(C) \
((C) ? to_sde_connector((C))->display : NULL)
/**
* sde_connector_get_panel - get sde connector's private panel pointer
* @C: Pointer to drm connector structure
* Returns: Pointer to associated private display structure
*/
#define sde_connector_get_panel(C) \
((C) ? to_sde_connector((C))->panel : NULL)
/**
* sde_connector_get_encoder - get sde connector's private encoder pointer
* @C: Pointer to drm connector structure
* Returns: Pointer to associated private encoder structure
*/
#define sde_connector_get_encoder(C) \
((C) ? to_sde_connector((C))->encoder : NULL)
/**
* sde_connector_qsync_updated - indicates if connector updated qsync
* @C: Pointer to drm connector structure
* Returns: True if qsync is updated; false otherwise
*/
#define sde_connector_is_qsync_updated(C) \
((C) ? to_sde_connector((C))->qsync_updated : 0)
/**
* sde_connector_get_qsync_mode - get sde connector's qsync_mode
* @C: Pointer to drm connector structure
* Returns: Current cached qsync_mode for given connector
*/
#define sde_connector_get_qsync_mode(C) \
((C) ? to_sde_connector((C))->qsync_mode : 0)
/**
* sde_connector_get_propinfo - get sde connector's property info pointer
* @C: Pointer to drm connector structure
* Returns: Pointer to associated private property info structure
*/
#define sde_connector_get_propinfo(C) \
((C) ? &to_sde_connector((C))->property_info : NULL)
/**
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
* @property_state: Local storage for msm_prop properties
* @property_values: Local cache of current connector property values
* @rois: Regions of interest structure for mapping CRTC to Connector output
* @property_blobs: blob properties
* @mode_info: local copy of msm_mode_info struct
* @hdr_meta: HDR metadata info passed from userspace
* @dyn_hdr_meta: Dynamic HDR metadata payload and state tracking
* @old_topology_name: topology of previous atomic state. remove this in later
* kernel versions which provide drm_atomic_state old_state pointers
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
struct msm_property_state property_state;
struct msm_property_value property_values[CONNECTOR_PROP_COUNT];
struct msm_roi_list rois;
struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
struct msm_mode_info mode_info;
struct drm_msm_ext_hdr_metadata hdr_meta;
struct sde_connector_dyn_hdr_metadata dyn_hdr_meta;
enum sde_rm_topology_name old_topology_name;
};
/**
* to_sde_connector_state - convert drm_connector_state pointer to
* sde connector state pointer
* @X: Pointer to drm_connector_state structure
* Returns: Pointer to sde_connector_state structure
*/
#define to_sde_connector_state(x) \
container_of((x), struct sde_connector_state, base)
/**
* sde_connector_get_property - query integer value of connector property
* @S: Pointer to drm connector state
* @X: Property index, from enum msm_mdp_connector_property
* Returns: Integer value of requested property
*/
#define sde_connector_get_property(S, X) \
((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
(to_sde_connector_state((S))->property_values[(X)].value) : 0)
/**
* sde_connector_get_property_state - retrieve property state cache
* @S: Pointer to drm connector state
* Returns: Pointer to local property state structure
*/
#define sde_connector_get_property_state(S) \
((S) ? (&to_sde_connector_state((S))->property_state) : NULL)
/**
* sde_connector_get_out_fb - query out_fb value from sde connector state
* @S: Pointer to drm connector state
* Returns: Output fb associated with specified connector state
*/
#define sde_connector_get_out_fb(S) \
((S) ? to_sde_connector_state((S))->out_fb : 0)
/**
* sde_connector_get_topology_name - helper accessor to retrieve topology_name
* @connector: pointer to drm connector
* Returns: value of the CONNECTOR_PROP_TOPOLOGY_NAME property or 0
*/
static inline uint64_t sde_connector_get_topology_name(
struct drm_connector *connector)
{
if (!connector || !connector->state)
return 0;
return sde_connector_get_property(connector->state,
CONNECTOR_PROP_TOPOLOGY_NAME);
}
/**
* sde_connector_get_old_topology_name - helper accessor to retrieve
* topology_name for the previous mode
* @connector: pointer to drm connector state
* Returns: cached value of the previous topology, or SDE_RM_TOPOLOGY_NONE
*/
static inline enum sde_rm_topology_name sde_connector_get_old_topology_name(
struct drm_connector_state *state)
{
struct sde_connector_state *c_state = to_sde_connector_state(state);
if (!state)
return SDE_RM_TOPOLOGY_NONE;
return c_state->old_topology_name;
}
/**
* sde_connector_set_old_topology_name - helper to cache value of previous
* mode's topology
* @connector: pointer to drm connector state
* Returns: 0 on success, negative errno on failure
*/
static inline int sde_connector_set_old_topology_name(
struct drm_connector_state *state,
enum sde_rm_topology_name top)
{
struct sde_connector_state *c_state = to_sde_connector_state(state);
if (!state)
return -EINVAL;
c_state->old_topology_name = top;
return 0;
}
/**
* sde_connector_get_lp - helper accessor to retrieve LP state
* @connector: pointer to drm connector
* Returns: value of the CONNECTOR_PROP_LP property or 0
*/
static inline uint64_t sde_connector_get_lp(
struct drm_connector *connector)
{
if (!connector || !connector->state)
return 0;
return sde_connector_get_property(connector->state,
CONNECTOR_PROP_LP);
}
/**
* sde_connector_set_property_for_commit - add property set to atomic state
* Add a connector state property update for the specified property index
* to the atomic state in preparation for a drm_atomic_commit.
* @connector: Pointer to drm connector
* @atomic_state: Pointer to DRM atomic state structure for commit
* @property_idx: Connector property index
* @value: Updated property value
* Returns: Zero on success
*/
int sde_connector_set_property_for_commit(struct drm_connector *connector,
struct drm_atomic_state *atomic_state,
uint32_t property_idx, uint64_t value);
/**
* sde_connector_init - create drm connector object for a given display
* @dev: Pointer to drm device struct
* @encoder: Pointer to associated encoder
* @panel: Pointer to associated panel, can be NULL
* @display: Pointer to associated display object
* @ops: Pointer to callback operations function table
* @connector_poll: Set to appropriate DRM_CONNECTOR_POLL_ setting
* @connector_type: Set to appropriate DRM_MODE_CONNECTOR_ type
* Returns: Pointer to newly created drm connector struct
*/
struct drm_connector *sde_connector_init(struct drm_device *dev,
struct drm_encoder *encoder,
struct drm_panel *panel,
void *display,
const struct sde_connector_ops *ops,
int connector_poll,
int connector_type);
/**
* sde_connector_prepare_fence - prepare fence support for current commit
* @connector: Pointer to drm connector object
*/
void sde_connector_prepare_fence(struct drm_connector *connector);
/**
* sde_connector_complete_commit - signal completion of current commit
* @connector: Pointer to drm connector object
* @ts: timestamp to be updated in the fence signalling
* @fence_event: enum value to indicate nature of fence event
*/
void sde_connector_complete_commit(struct drm_connector *connector,
ktime_t ts, enum sde_fence_event fence_event);
/**
* sde_connector_commit_reset - reset the completion signal
* @connector: Pointer to drm connector object
* @ts: timestamp to be updated in the fence signalling
*/
void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts);
/**
* sde_connector_get_info - query display specific information
* @connector: Pointer to drm connector object
* @info: Pointer to msm display information structure
* Returns: Zero on success
*/
int sde_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info);
/**
* sde_connector_clk_ctrl - enables/disables the connector clks
* @connector: Pointer to drm connector object
* @enable: true/false to enable/disable
* Returns: Zero on success
*/
int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
/**
* sde_connector_get_dpms - query dpms setting
* @connector: Pointer to drm connector structure
* Returns: Current DPMS setting for connector
*/
int sde_connector_get_dpms(struct drm_connector *connector);
/**
* sde_connector_set_qsync_params - set status of qsync_updated for current
* frame and update the cached qsync_mode
* @connector: pointer to drm connector
*
* This must be called after the connector set_property values are applied,
* and before sde_connector's qsync_updated or qsync_mode fields are accessed.
* It must only be called once per frame update for the given connector.
*/
void sde_connector_set_qsync_params(struct drm_connector *connector);
/**
* sde_connector_get_dyn_hdr_meta - returns pointer to connector state's dynamic
* HDR metadata info
* @connector: pointer to drm connector
*/
struct sde_connector_dyn_hdr_metadata *sde_connector_get_dyn_hdr_meta(
struct drm_connector *connector);
/**
* sde_connector_trigger_event - indicate that an event has occurred
* Any callbacks that have been registered against this event will
* be called from the same thread context.
* @connector: Pointer to drm connector structure
* @event_idx: Index of event to trigger
* @instance_idx: Event-specific "instance index" to pass to callback
* @data0: Event-specific "data" to pass to callback
* @data1: Event-specific "data" to pass to callback
* @data2: Event-specific "data" to pass to callback
* @data3: Event-specific "data" to pass to callback
* Returns: Zero on success
*/
int sde_connector_trigger_event(void *drm_connector,
uint32_t event_idx, uint32_t instance_idx,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3);
/**
* sde_connector_register_event - register a callback function for an event
* @connector: Pointer to drm connector structure
* @event_idx: Index of event to register
* @cb_func: Pointer to desired callback function
* @usr: User pointer to pass to callback on event trigger
* Returns: Zero on success
*/
int sde_connector_register_event(struct drm_connector *connector,
uint32_t event_idx,
int (*cb_func)(uint32_t event_idx,
uint32_t instance_idx, void *usr,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3),
void *usr);
/**
* sde_connector_unregister_event - unregister all callbacks for an event
* @connector: Pointer to drm connector structure
* @event_idx: Index of event to register
*/
void sde_connector_unregister_event(struct drm_connector *connector,
uint32_t event_idx);
/**
* sde_connector_register_custom_event - register for async events
* @kms: Pointer to sde_kms
* @conn_drm: Pointer to drm connector object
* @event: Event for which request is being sent
* @en: Flag to enable/disable the event
* Returns: Zero on success
*/
int sde_connector_register_custom_event(struct sde_kms *kms,
struct drm_connector *conn_drm, u32 event, bool en);
/**
* sde_connector_pre_kickoff - trigger kickoff time feature programming
* @connector: Pointer to drm connector object
* Returns: Zero on success
*/
int sde_connector_pre_kickoff(struct drm_connector *connector);
/**
* sde_connector_needs_offset - adjust the output fence offset based on
* display type
* @connector: Pointer to drm connector object
* Returns: true if offset is required, false for all other cases.
*/
static inline bool sde_connector_needs_offset(struct drm_connector *connector)
{
struct sde_connector *c_conn;
if (!connector)
return false;
c_conn = to_sde_connector(connector);
return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
}
/**
* sde_connector_get_dither_cfg - get dither property data
* @conn: Pointer to drm_connector struct
* @state: Pointer to drm_connector_state struct
* @cfg: Pointer to pointer to dither cfg
* @len: length of the dither data
* Returns: Zero on success
*/
int sde_connector_get_dither_cfg(struct drm_connector *conn,
struct drm_connector_state *state, void **cfg, size_t *len);
/**
* sde_connector_set_blob_data - set connector blob property data
* @conn: Pointer to drm_connector struct
* @state: Pointer to the drm_connector_state struct
* @prop_id: property id to be populated
* Returns: Zero on success
*/
int sde_connector_set_blob_data(struct drm_connector *conn,
struct drm_connector_state *state,
enum msm_mdp_conn_property prop_id);
/**
* sde_connector_roi_v1_check_roi - validate connector ROI
* @conn_state: Pointer to drm_connector_state struct
* Returns: Zero on success
*/
int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state);
/**
* sde_connector_schedule_status_work - manage ESD thread
* conn: Pointer to drm_connector struct
* @en: flag to start/stop ESD thread
*/
void sde_connector_schedule_status_work(struct drm_connector *conn, bool en);
/**
* sde_connector_helper_reset_properties - reset properties to default values in
* the given DRM connector state object
* @connector: Pointer to DRM connector object
* @connector_state: Pointer to DRM connector state object
* Returns: 0 on success, negative errno on failure
*/
int sde_connector_helper_reset_custom_properties(
struct drm_connector *connector,
struct drm_connector_state *connector_state);
/**
* sde_connector_get_mode_info - get information of the current mode in the
* given connector state.
* conn_state: Pointer to the DRM connector state object
* mode_info: Pointer to the mode info structure
*/
int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
struct msm_mode_info *mode_info);
/**
* sde_conn_timeline_status - current buffer timeline status
* conn: Pointer to drm_connector struct
*/
void sde_conn_timeline_status(struct drm_connector *conn);
/**
* sde_connector_helper_bridge_disable - helper function for drm bridge disable
* @connector: Pointer to DRM connector object
*/
void sde_connector_helper_bridge_disable(struct drm_connector *connector);
/**
* sde_connector_destroy - destroy drm connector object
* @connector: Pointer to DRM connector object
*/
void sde_connector_destroy(struct drm_connector *connector);
/**
* sde_connector_event_notify - signal hw recovery event to client
* @connector: pointer to connector
* @type: event type
* @len: length of the value of the event
* @val: value
*/
int sde_connector_event_notify(struct drm_connector *connector, uint32_t type,
uint32_t len, uint32_t val);
/**
* sde_connector_helper_bridge_enable - helper function for drm bridge enable
* @connector: Pointer to DRM connector object
*/
void sde_connector_helper_bridge_enable(struct drm_connector *connector);
/**
* sde_connector_get_panel_vfp - helper to get panel vfp
* @connector: pointer to drm connector
* @h_active: panel width
* @v_active: panel heigth
* Returns: v_front_porch on success error-code on failure
*/
int sde_connector_get_panel_vfp(struct drm_connector *connector,
struct drm_display_mode *mode);
/**
* sde_connector_esd_status - helper function to check te status
* @connector: Pointer to DRM connector object
*/
int sde_connector_esd_status(struct drm_connector *connector);
#endif /* _SDE_CONNECTOR_H_ */

663
msm/sde/sde_core_irq.c Normal file
View File

@@ -0,0 +1,663 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include "sde_core_irq.h"
#include "sde_power_handle.h"
/**
* sde_core_irq_callback_handler - dispatch core interrupts
* @arg: private data of callback handler
* @irq_idx: interrupt index
*/
static void sde_core_irq_callback_handler(void *arg, int irq_idx)
{
struct sde_kms *sde_kms = arg;
struct sde_irq *irq_obj = &sde_kms->irq_obj;
struct sde_irq_callback *cb;
unsigned long irq_flags;
bool cb_tbl_error = false;
int enable_counts = 0;
pr_debug("irq_idx=%d\n", irq_idx);
spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
/* print error outside lock */
cb_tbl_error = true;
enable_counts = atomic_read(
&sde_kms->irq_obj.enable_counts[irq_idx]);
}
atomic_inc(&irq_obj->irq_counts[irq_idx]);
/*
* Perform registered function callback
*/
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
if (cb->func)
cb->func(cb->arg, irq_idx);
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
if (cb_tbl_error) {
/*
* If enable count is zero and callback list is empty, then it's
* not a fatal issue. Log this case as debug. If the enable
* count is nonzero and callback list is empty, then its a real
* issue. Log this case as error to ensure we don't have silent
* IRQs running.
*/
if (!enable_counts) {
SDE_DEBUG("irq has no callback, idx %d enables %d\n",
irq_idx, enable_counts);
SDE_EVT32_IRQ(irq_idx, enable_counts);
} else {
SDE_ERROR("irq has no callback, idx %d enables %d\n",
irq_idx, enable_counts);
SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
}
}
/*
* Clear pending interrupt status in HW.
* NOTE: sde_core_irq_callback_handler is protected by top-level
* spinlock, so it is safe to clear any interrupt status here.
*/
sde_kms->hw_intr->ops.clear_intr_status_nolock(
sde_kms->hw_intr,
irq_idx);
}
int sde_core_irq_idx_lookup(struct sde_kms *sde_kms,
enum sde_intr_type intr_type, u32 instance_idx)
{
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->hw_intr->ops.irq_idx_lookup)
return -EINVAL;
return sde_kms->hw_intr->ops.irq_idx_lookup(
sde_kms->hw_intr, intr_type,
instance_idx);
}
/**
* _sde_core_irq_enable - enable core interrupt given by the index
* @sde_kms: Pointer to sde kms context
* @irq_idx: interrupt index
*/
static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
{
unsigned long irq_flags;
int ret = 0;
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->irq_obj.enable_counts ||
!sde_kms->irq_obj.irq_counts) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1)
ret = sde_kms->hw_intr->ops.enable_irq_nolock(
sde_kms->hw_intr, irq_idx);
spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
if (ret)
SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n", irq_idx);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
if (atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is enabled */
if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
SDE_ERROR("irq_idx=%d enabled with no callback\n",
irq_idx);
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
}
return ret;
}
int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
return ret;
}
/**
* _sde_core_irq_disable - disable core interrupt given by the index
* @sde_kms: Pointer to sde kms context
* @irq_idx: interrupt index
*/
static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
{
int ret = 0;
unsigned long irq_flags;
if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
spin_lock_irqsave(&sde_kms->hw_intr->irq_lock, irq_flags);
if (atomic_add_unless(&sde_kms->irq_obj.enable_counts[irq_idx], -1, 0)
&& atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0)
ret = sde_kms->hw_intr->ops.disable_irq_nolock(
sde_kms->hw_intr, irq_idx);
spin_unlock_irqrestore(&sde_kms->hw_intr->irq_lock, irq_flags);
if (ret)
SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n", irq_idx);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
return ret;
}
int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
int i, ret = 0;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
return ret;
}
/**
* sde_core_irq_disable_nolock - disable core interrupt given by the index
* without lock
* @sde_kms: Pointer to sde kms context
* @irq_idx: interrupt index
*/
int sde_core_irq_disable_nolock(struct sde_kms *sde_kms, int irq_idx)
{
int ret = 0;
if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
ret = sde_kms->hw_intr->ops.disable_irq_nolock(
sde_kms->hw_intr,
irq_idx);
if (ret)
SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
irq_idx);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
return ret;
}
u32 sde_core_irq_read_nolock(struct sde_kms *sde_kms, int irq_idx, bool clear)
{
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->hw_intr->ops.get_interrupt_status)
return 0;
if (irq_idx < 0) {
SDE_ERROR("[%pS] invalid irq_idx=%d\n",
__builtin_return_address(0), irq_idx);
return 0;
}
return sde_kms->hw_intr->ops.get_intr_status_nolock(sde_kms->hw_intr,
irq_idx, clear);
}
u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
{
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->hw_intr->ops.get_interrupt_status)
return 0;
if (irq_idx < 0) {
SDE_ERROR("[%pS] invalid irq_idx=%d\n",
__builtin_return_address(0), irq_idx);
return 0;
}
return sde_kms->hw_intr->ops.get_interrupt_status(sde_kms->hw_intr,
irq_idx, clear);
}
int sde_core_irq_register_callback(struct sde_kms *sde_kms, int irq_idx,
struct sde_irq_callback *register_irq_cb)
{
unsigned long irq_flags;
if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
if (!register_irq_cb || !register_irq_cb->func) {
SDE_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
SDE_EVT32(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
list_add_tail(&register_irq_cb->list,
&sde_kms->irq_obj.irq_cb_tbl[irq_idx]);
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
return 0;
}
int sde_core_irq_unregister_callback(struct sde_kms *sde_kms, int irq_idx,
struct sde_irq_callback *register_irq_cb)
{
unsigned long irq_flags;
if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
if (!register_irq_cb || !register_irq_cb->func) {
SDE_ERROR("invalid irq_cb:%d func:%d\n",
register_irq_cb != NULL,
register_irq_cb ?
register_irq_cb->func != NULL : -1);
return -EINVAL;
}
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
}
SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
SDE_EVT32(irq_idx, register_irq_cb);
list_del_init(&register_irq_cb->list);
/* empty callback list but interrupt is still enabled */
if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]))
SDE_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
return 0;
}
static void sde_clear_all_irqs(struct sde_kms *sde_kms)
{
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->hw_intr->ops.clear_all_irqs)
return;
sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
}
static void sde_disable_all_irqs(struct sde_kms *sde_kms)
{
if (!sde_kms || !sde_kms->hw_intr ||
!sde_kms->hw_intr->ops.disable_all_irqs)
return;
sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
}
#ifdef CONFIG_DEBUG_FS
#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __prefix ## _show, inode->i_private); \
} \
static const struct file_operations __prefix ## _fops = { \
.owner = THIS_MODULE, \
.open = __prefix ## _open, \
.release = single_release, \
.read = seq_read, \
.llseek = seq_lseek, \
}
static int sde_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct sde_irq *irq_obj = s->private;
struct sde_irq_callback *cb;
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
SDE_ERROR("invalid parameters\n");
return 0;
}
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
cb_count = 0;
irq_count = atomic_read(&irq_obj->irq_counts[i]);
enable_count = atomic_read(&irq_obj->enable_counts[i]);
list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
cb_count++;
spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
if (irq_count || enable_count || cb_count)
seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
i, irq_count, enable_count, cb_count);
}
return 0;
}
DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
struct dentry *parent)
{
sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0400,
parent, &sde_kms->irq_obj,
&sde_debugfs_core_irq_fops);
return 0;
}
void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
{
debugfs_remove(sde_kms->irq_obj.debugfs_file);
sde_kms->irq_obj.debugfs_file = NULL;
}
#else
int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
struct dentry *parent)
{
return 0;
}
void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
{
}
#endif
void sde_core_irq_preinstall(struct sde_kms *sde_kms)
{
struct msm_drm_private *priv;
int i;
int rc;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
return;
} else if (!sde_kms->dev) {
SDE_ERROR("invalid drm device\n");
return;
} else if (!sde_kms->dev->dev_private) {
SDE_ERROR("invalid device private\n");
return;
}
priv = sde_kms->dev->dev_private;
rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
true);
if (rc) {
SDE_ERROR("failed to enable power resource %d\n", rc);
SDE_EVT32(rc, SDE_EVTLOG_ERROR);
return;
}
sde_clear_all_irqs(sde_kms);
sde_disable_all_irqs(sde_kms);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
spin_lock_init(&sde_kms->irq_obj.cb_lock);
/* Create irq callbacks for all possible irq_idx */
sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->sde_irq_map_size;
sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
sizeof(struct list_head), GFP_KERNEL);
sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL);
sde_kms->irq_obj.irq_counts = kcalloc(sde_kms->irq_obj.total_irqs,
sizeof(atomic_t), GFP_KERNEL);
for (i = 0; i < sde_kms->irq_obj.total_irqs; i++) {
INIT_LIST_HEAD(&sde_kms->irq_obj.irq_cb_tbl[i]);
atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
}
}
int sde_core_irq_postinstall(struct sde_kms *sde_kms)
{
return 0;
}
void sde_core_irq_uninstall(struct sde_kms *sde_kms)
{
struct msm_drm_private *priv;
int i;
int rc;
unsigned long irq_flags;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
return;
} else if (!sde_kms->dev) {
SDE_ERROR("invalid drm device\n");
return;
} else if (!sde_kms->dev->dev_private) {
SDE_ERROR("invalid device private\n");
return;
}
priv = sde_kms->dev->dev_private;
rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
true);
if (rc) {
SDE_ERROR("failed to enable power resource %d\n", rc);
SDE_EVT32(rc, SDE_EVTLOG_ERROR);
return;
}
for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
!list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
SDE_ERROR("irq_idx=%d still enabled/registered\n", i);
sde_clear_all_irqs(sde_kms);
sde_disable_all_irqs(sde_kms);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
kfree(sde_kms->irq_obj.irq_cb_tbl);
kfree(sde_kms->irq_obj.enable_counts);
kfree(sde_kms->irq_obj.irq_counts);
sde_kms->irq_obj.irq_cb_tbl = NULL;
sde_kms->irq_obj.enable_counts = NULL;
sde_kms->irq_obj.irq_counts = NULL;
sde_kms->irq_obj.total_irqs = 0;
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
}
static void sde_core_irq_mask(struct irq_data *irqd)
{
struct sde_kms *sde_kms;
if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
return;
}
sde_kms = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static void sde_core_irq_unmask(struct irq_data *irqd)
{
struct sde_kms *sde_kms;
if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
return;
}
sde_kms = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static struct irq_chip sde_core_irq_chip = {
.name = "sde",
.irq_mask = sde_core_irq_mask,
.irq_unmask = sde_core_irq_unmask,
};
static int sde_core_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct sde_kms *sde_kms;
int rc;
if (!domain || !domain->host_data) {
SDE_ERROR("invalid parameters domain %d\n", domain != NULL);
return -EINVAL;
}
sde_kms = domain->host_data;
irq_set_chip_and_handler(irq, &sde_core_irq_chip, handle_level_irq);
rc = irq_set_chip_data(irq, sde_kms);
return rc;
}
static const struct irq_domain_ops sde_core_irqdomain_ops = {
.map = sde_core_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
int sde_core_irq_domain_add(struct sde_kms *sde_kms)
{
struct device *dev;
struct irq_domain *domain;
if (!sde_kms->dev || !sde_kms->dev->dev) {
pr_err("invalid device handles\n");
return -EINVAL;
}
dev = sde_kms->dev->dev;
domain = irq_domain_add_linear(dev->of_node, 32,
&sde_core_irqdomain_ops, sde_kms);
if (!domain) {
pr_err("failed to add irq_domain\n");
return -EINVAL;
}
sde_kms->irq_controller.enabled_mask = 0;
sde_kms->irq_controller.domain = domain;
return 0;
}
int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
{
if (sde_kms->irq_controller.domain) {
irq_domain_remove(sde_kms->irq_controller.domain);
sde_kms->irq_controller.domain = NULL;
}
return 0;
}
irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
{
/*
* Read interrupt status from all sources. Interrupt status are
* stored within hw_intr.
* Function will also clear the interrupt status after reading.
* Individual interrupt status bit will only get stored if it
* is enabled.
*/
sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
/*
* Dispatch to HW driver to handle interrupt lookup that is being
* fired. When matching interrupt is located, HW driver will call to
* sde_core_irq_callback_handler with the irq_idx from the lookup table.
* sde_core_irq_callback_handler will perform the registered function
* callback, and do the interrupt status clearing once the registered
* callback is finished.
*/
sde_kms->hw_intr->ops.dispatch_irqs(
sde_kms->hw_intr,
sde_core_irq_callback_handler,
sde_kms);
return IRQ_HANDLED;
}

185
msm/sde/sde_core_irq.h Normal file
View File

@@ -0,0 +1,185 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_CORE_IRQ_H__
#define __SDE_CORE_IRQ_H__
#include "sde_kms.h"
#include "sde_hw_interrupts.h"
/**
* sde_core_irq_preinstall - perform pre-installation of core IRQ handler
* @sde_kms: SDE handle
* @return: none
*/
void sde_core_irq_preinstall(struct sde_kms *sde_kms);
/**
* sde_core_irq_postinstall - perform post-installation of core IRQ handler
* @sde_kms: SDE handle
* @return: 0 if success; error code otherwise
*/
int sde_core_irq_postinstall(struct sde_kms *sde_kms);
/**
* sde_core_irq_uninstall - uninstall core IRQ handler
* @sde_kms: SDE handle
* @return: none
*/
void sde_core_irq_uninstall(struct sde_kms *sde_kms);
/**
* sde_core_irq_domain_add - Add core IRQ domain for SDE
* @sde_kms: SDE handle
* @return: none
*/
int sde_core_irq_domain_add(struct sde_kms *sde_kms);
/**
* sde_core_irq_domain_fini - uninstall core IRQ domain
* @sde_kms: SDE handle
* @return: 0 if success; error code otherwise
*/
int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
/**
* sde_core_irq - core IRQ handler
* @sde_kms: SDE handle
* @return: interrupt handling status
*/
irqreturn_t sde_core_irq(struct sde_kms *sde_kms);
/**
* sde_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
* interrupt mapping table.
* @sde_kms: SDE handle
* @intr_type: SDE HW interrupt type for lookup
* @instance_idx: SDE HW block instance defined in sde_hw_mdss.h
* @return: irq_idx or -EINVAL when fail to lookup
*/
int sde_core_irq_idx_lookup(
struct sde_kms *sde_kms,
enum sde_intr_type intr_type,
uint32_t instance_idx);
/**
* sde_core_irq_enable - IRQ helper function for enabling one or more IRQs
* @sde_kms: SDE handle
* @irq_idxs: Array of irq index
* @irq_count: Number of irq_idx provided in the array
* @return: 0 for success enabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is enabled if count is 0 before increment.
*/
int sde_core_irq_enable(
struct sde_kms *sde_kms,
int *irq_idxs,
uint32_t irq_count);
/**
* sde_core_irq_disable - IRQ helper function for disabling one of more IRQs
* @sde_kms: SDE handle
* @irq_idxs: Array of irq index
* @irq_count: Number of irq_idx provided in the array
* @return: 0 for success disabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is disabled if count is 0 after decrement.
*/
int sde_core_irq_disable(
struct sde_kms *sde_kms,
int *irq_idxs,
uint32_t irq_count);
/**
* sde_core_irq_disable_nolock - no lock version of sde_core_irq_disable
* @sde_kms: SDE handle
* @irq_idx: Irq index
* @return: 0 for success disabling IRQ, otherwise failure
*
* This function increments count on each enable and decrements on each
* disable. Interrupts is disabled if count is 0 after decrement.
*/
int sde_core_irq_disable_nolock(
struct sde_kms *sde_kms,
int irq_idx);
/**
* sde_core_irq_read - IRQ helper function for reading IRQ status
* @sde_kms: SDE handle
* @irq_idx: irq index
* @clear: True to clear the irq after read
* @return: non-zero if irq detected; otherwise no irq detected
*/
u32 sde_core_irq_read(
struct sde_kms *sde_kms,
int irq_idx,
bool clear);
/**
* sde_core_irq_read - no lock version of sde_core_irq_read
* @sde_kms: SDE handle
* @irq_idx: irq index
* @clear: True to clear the irq after read
* @return: non-zero if irq detected; otherwise no irq detected
*/
u32 sde_core_irq_read_nolock(
struct sde_kms *sde_kms,
int irq_idx,
bool clear);
/**
* sde_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @sde_kms: SDE handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must exist until un-registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int sde_core_irq_register_callback(
struct sde_kms *sde_kms,
int irq_idx,
struct sde_irq_callback *irq_cb);
/**
* sde_core_irq_unregister_callback - For unregistering callback function on IRQ
* interrupt
* @sde_kms: SDE handle
* @irq_idx: irq index
* @irq_cb: IRQ callback structure, containing callback function
* and argument. Passing NULL for irq_cb will unregister
* the callback for the given irq_idx
* This must match with registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int sde_core_irq_unregister_callback(
struct sde_kms *sde_kms,
int irq_idx,
struct sde_irq_callback *irq_cb);
/**
* sde_debugfs_core_irq_init - register core irq debugfs
* @sde_kms: pointer to kms
* @parent: debugfs directory root
* @Return: 0 on success
*/
int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
struct dentry *parent);
/**
* sde_debugfs_core_irq_destroy - deregister core irq debugfs
* @sde_kms: pointer to kms
*/
void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms);
#endif /* __SDE_CORE_IRQ_H__ */

1252
msm/sde/sde_core_perf.c Normal file

File diff suppressed because it is too large Load Diff

162
msm/sde/sde_core_perf.h Normal file
View File

@@ -0,0 +1,162 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_CORE_PERF_H_
#define _SDE_CORE_PERF_H_
#include <linux/types.h>
#include <linux/dcache.h>
#include <linux/mutex.h>
#include <drm/drm_crtc.h>
#include "sde_hw_catalog.h"
#include "sde_power_handle.h"
#define SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE 320000000
/**
* uidle performance counters mode
* @SDE_PERF_UIDLE_DISABLE: Disable logging (default)
* @SDE_PERF_UIDLE_CNT: Enable logging of uidle performance counters
* @SDE_PERF_UIDLE_STATUS: Enable logging of uidle status
* @SDE_PERF_UIDLE_MAX: Max available mode
*/
#define SDE_PERF_UIDLE_DISABLE 0x0
#define SDE_PERF_UIDLE_CNT BIT(0)
#define SDE_PERF_UIDLE_STATUS BIT(1)
#define SDE_PERF_UIDLE_MAX BIT(2)
/**
* struct sde_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
* @core_clk_rate: core clock rate request
* @llcc_active: request to activate/deactivate the llcc
*/
struct sde_core_perf_params {
u64 max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MAX];
u64 bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MAX];
u64 core_clk_rate;
bool llcc_active;
};
/**
* struct sde_core_perf_tune - definition of performance tuning control
* @mode: performance mode
* @min_core_clk: minimum core clock
* @min_bus_vote: minimum bus vote
*/
struct sde_core_perf_tune {
u32 mode;
u64 min_core_clk;
u64 min_bus_vote;
};
/**
* struct sde_core_perf - definition of core performance context
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
* @phandle: Pointer to power handler
* @pclient: Pointer to power client
* @clk_name: core clock name
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
* @perf_tune: debug control for performance tuning
* @enable_bw_release: debug control for bandwidth release
* @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
* @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
* @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
* @bw_vote_mode: apps rsc vs display rsc bandwidth vote mode
* @sde_rsc_available: is display rsc available
* @bw_vote_mode_updated: bandwidth vote mode update
* @llcc_active: status of the llcc, true if active.
* @uidle_enabled: indicates if uidle is already enabled
*/
struct sde_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
struct sde_mdss_cfg *catalog;
struct sde_power_handle *phandle;
struct sde_power_client *pclient;
char *clk_name;
struct clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
struct sde_core_perf_tune perf_tune;
u32 enable_bw_release;
u64 fix_core_clk_rate;
u64 fix_core_ib_vote;
u64 fix_core_ab_vote;
u32 bw_vote_mode;
bool sde_rsc_available;
bool bw_vote_mode_updated;
bool llcc_active;
bool uidle_enabled;
};
/**
* sde_core_perf_crtc_check - validate performance of the given crtc state
* @crtc: Pointer to crtc
* @state: Pointer to new crtc state
* return: zero if success, or error code otherwise
*/
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
/**
* sde_core_perf_crtc_update - update performance of the given crtc
* @crtc: Pointer to crtc
* @params_changed: true if crtc parameters are modified
* @stop_req: true if this is a stop request
*/
void sde_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed, bool stop_req);
/**
* sde_core_perf_crtc_release_bw - release bandwidth of the given crtc
* @crtc: Pointer to crtc
*/
void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc);
/**
* sde_core_perf_crtc_update_uidle - attempts to enable uidle of the given crtc
* @crtc: Pointer to crtc
* @enable: enable/disable uidle
*/
void sde_core_perf_crtc_update_uidle(struct drm_crtc *crtc, bool enable);
/**
* sde_core_perf_destroy - destroy the given core performance context
* @perf: Pointer to core performance context
*/
void sde_core_perf_destroy(struct sde_core_perf *perf);
/**
* sde_core_perf_init - initialize the given core performance context
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
* @phandle: Pointer to power handle
* @pclient: Pointer to power client
* @clk_name: core clock name
*/
int sde_core_perf_init(struct sde_core_perf *perf,
struct drm_device *dev,
struct sde_mdss_cfg *catalog,
struct sde_power_handle *phandle,
struct sde_power_client *pclient,
char *clk_name);
/**
* sde_core_perf_debugfs_init - initialize debugfs for core performance context
* @perf: Pointer to core performance context
* @debugfs_parent: Pointer to parent debugfs
*/
int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
struct dentry *parent);
#endif /* _SDE_CORE_PERF_H_ */

6315
msm/sde/sde_crtc.c Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More