Merge "disp: msm: dsi: add const qualifer for device node in get_named_gpio"
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

commit
5f25adc693
11
config/gki_kalamadisp.conf
Normal file
11
config/gki_kalamadisp.conf
Normal file
@@ -0,0 +1,11 @@
|
||||
export CONFIG_DRM_MSM=y
|
||||
export CONFIG_DRM_MSM_SDE=y
|
||||
export CONFIG_SYNC_FILE=y
|
||||
export CONFIG_DRM_MSM_DSI=y
|
||||
export CONFIG_DSI_PARSER=y
|
||||
export CONFIG_QCOM_MDSS_PLL=y
|
||||
export CONFIG_DRM_SDE_RSC=y
|
||||
export CONFIG_DRM_SDE_WB=y
|
||||
export CONFIG_DRM_MSM_REGISTER_LOGGING=y
|
||||
export CONFIG_DISPLAY_BUILD=m
|
||||
export CONFIG_HDCP_QSEECOM=y
|
18
config/gki_kalamadispconf.h
Normal file
18
config/gki_kalamadispconf.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define CONFIG_DRM_MSM 1
|
||||
#define CONFIG_DRM_MSM_SDE 1
|
||||
#define CONFIG_SYNC_FILE 1
|
||||
#define CONFIG_DRM_MSM_DSI 1
|
||||
#define CONFIG_DSI_PARSER 1
|
||||
#define CONFIG_DRM_SDE_WB 1
|
||||
#define CONFIG_DRM_SDE_RSC 1
|
||||
#define CONFIG_DRM_MSM_REGISTER_LOGGING 1
|
||||
#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
|
||||
#define CONFIG_QCOM_MDSS_PLL 1
|
||||
#define CONFIG_GKI_DISPLAY 1
|
||||
#define CONFIG_MSM_EXT_DISPLAY 1
|
||||
#define CONFIG_HDCP_QSEECOM 1
|
@@ -1,5 +1,5 @@
|
||||
#SPDX-License-Identifier: GPL-2.0-only
|
||||
ifneq ($(TARGET_USES_QMAA),true)
|
||||
|
||||
ifneq ($(TARGET_BOARD_AUTO),true)
|
||||
ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
|
||||
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_drm.ko
|
||||
@@ -7,4 +7,3 @@ ifneq ($(TARGET_BOARD_AUTO),true)
|
||||
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_drm.ko
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
KDIR := $(TOP)/kernel_platform/common
|
||||
KDIR := $(TOP)/kernel_platform/msm-kernel
|
||||
|
||||
ifeq ($(CONFIG_ARCH_WAIPIO), y)
|
||||
ifeq ($(CONFIG_ARCH_QTI_VM), y)
|
||||
@@ -12,6 +12,11 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
#ifeq ($(CONFIG_ARCH_KALAMA), y)
|
||||
include $(DISPLAY_ROOT)/config/gki_kalamadisp.conf
|
||||
LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_kalamadispconf.h
|
||||
#endif
|
||||
|
||||
LINUX_INC += -Iinclude/linux \
|
||||
-Iinclude/linux/drm
|
||||
|
||||
|
6
msm/dp/dp_ctrl.c
Normal file → Executable file
6
msm/dp/dp_ctrl.c
Normal file → Executable file
@@ -372,7 +372,7 @@ static int dp_ctrl_link_training_1(struct dp_ctrl_private *ctrl)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
|
||||
drm_dp_link_train_clock_recovery_delay(ctrl->aux->drm_aux, ctrl->panel->dpcd);
|
||||
|
||||
ret = dp_ctrl_read_link_status(ctrl, link_status);
|
||||
if (ret)
|
||||
@@ -457,7 +457,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
|
||||
static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
|
||||
{
|
||||
dp_ctrl_update_sink_pattern(ctrl, 0);
|
||||
drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
|
||||
drm_dp_link_train_channel_eq_delay(ctrl->aux->drm_aux, ctrl->panel->dpcd);
|
||||
}
|
||||
|
||||
static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
|
||||
@@ -503,7 +503,7 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
|
||||
drm_dp_link_train_channel_eq_delay(ctrl->aux->drm_aux, ctrl->panel->dpcd);
|
||||
|
||||
ret = dp_ctrl_read_link_status(ctrl, link_status);
|
||||
if (ret)
|
||||
|
@@ -1873,7 +1873,7 @@ int dp_mst_init(struct dp_display *dp_display)
|
||||
dp_mst.caps.drm_aux,
|
||||
dp_mst.caps.max_dpcd_transaction_bytes,
|
||||
dp_mst.caps.max_streams_supported,
|
||||
conn_base_id);
|
||||
4, DP_LINK_BW_8_1, conn_base_id);
|
||||
if (ret) {
|
||||
DP_ERR("dp drm mst topology manager init failed\n");
|
||||
goto error;
|
||||
|
@@ -297,17 +297,7 @@ static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
|
||||
goto error_remove_dir;
|
||||
}
|
||||
|
||||
cmd_dma_logs = debugfs_create_bool("enable_cmd_dma_stats",
|
||||
0600,
|
||||
dir,
|
||||
&dsi_ctrl->enable_cmd_dma_stats);
|
||||
if (IS_ERR_OR_NULL(cmd_dma_logs)) {
|
||||
rc = PTR_ERR(cmd_dma_logs);
|
||||
DSI_CTRL_ERR(dsi_ctrl,
|
||||
"enable cmd dma stats failed, rc=%d\n",
|
||||
rc);
|
||||
goto error_remove_dir;
|
||||
}
|
||||
debugfs_create_bool("enable_cmd_dma_stats", 0600, dir, &dsi_ctrl->enable_cmd_dma_stats);
|
||||
|
||||
cmd_dma_logs = debugfs_create_file("cmd_dma_stats",
|
||||
0444,
|
||||
|
@@ -2007,40 +2007,20 @@ static int dsi_display_debugfs_init(struct dsi_display *display)
|
||||
|
||||
snprintf(name, ARRAY_SIZE(name),
|
||||
"%s_allow_phy_power_off", phy->name);
|
||||
dump_file = debugfs_create_bool(name, 0600, dir,
|
||||
&phy->allow_phy_power_off);
|
||||
if (IS_ERR_OR_NULL(dump_file)) {
|
||||
rc = PTR_ERR(dump_file);
|
||||
DSI_ERR("[%s] debugfs create %s failed, rc=%d\n",
|
||||
display->name, name, rc);
|
||||
goto error_remove_dir;
|
||||
}
|
||||
debugfs_create_bool(name, 0600, dir, &phy->allow_phy_power_off);
|
||||
|
||||
snprintf(name, ARRAY_SIZE(name),
|
||||
"%s_regulator_min_datarate_bps", phy->name);
|
||||
debugfs_create_u32(name, 0600, dir, &phy->regulator_min_datarate_bps);
|
||||
}
|
||||
|
||||
if (!debugfs_create_bool("ulps_feature_enable", 0600, dir,
|
||||
&display->panel->ulps_feature_enabled)) {
|
||||
DSI_ERR("[%s] debugfs create ulps feature enable file failed\n",
|
||||
display->name);
|
||||
goto error_remove_dir;
|
||||
}
|
||||
debugfs_create_bool("ulps_feature_enable", 0600, dir,
|
||||
&display->panel->ulps_feature_enabled);
|
||||
|
||||
if (!debugfs_create_bool("ulps_suspend_feature_enable", 0600, dir,
|
||||
&display->panel->ulps_suspend_enabled)) {
|
||||
DSI_ERR("[%s] debugfs create ulps-suspend feature enable file failed\n",
|
||||
display->name);
|
||||
goto error_remove_dir;
|
||||
}
|
||||
debugfs_create_bool("ulps_suspend_feature_enable", 0600, dir,
|
||||
&display->panel->ulps_suspend_enabled);
|
||||
|
||||
if (!debugfs_create_bool("ulps_status", 0400, dir,
|
||||
&display->ulps_enabled)) {
|
||||
DSI_ERR("[%s] debugfs create ulps status file failed\n",
|
||||
display->name);
|
||||
goto error_remove_dir;
|
||||
}
|
||||
debugfs_create_bool("ulps_status", 0400, dir, &display->ulps_enabled);
|
||||
|
||||
debugfs_create_u32("clk_gating_config", 0600, dir, &display->clk_gating_config);
|
||||
|
||||
@@ -6383,11 +6363,11 @@ static int dsi_host_ext_attach(struct mipi_dsi_host *host,
|
||||
DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
|
||||
|
||||
panel->video_config.hsa_lp11_en =
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA;
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA;
|
||||
panel->video_config.hbp_lp11_en =
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP;
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP;
|
||||
panel->video_config.hfp_lp11_en =
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP;
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP;
|
||||
panel->video_config.pulse_mode_hsa_he =
|
||||
dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE;
|
||||
} else {
|
||||
|
@@ -747,13 +747,6 @@ error:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void dsi_panel_pwm_unregister(struct dsi_panel *panel)
|
||||
{
|
||||
struct dsi_backlight_config *bl = &panel->bl_config;
|
||||
|
||||
devm_pwm_put(panel->parent, bl->pwm_bl);
|
||||
}
|
||||
|
||||
static int dsi_panel_bl_unregister(struct dsi_panel *panel)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -770,7 +763,6 @@ static int dsi_panel_bl_unregister(struct dsi_panel *panel)
|
||||
case DSI_BACKLIGHT_EXTERNAL:
|
||||
break;
|
||||
case DSI_BACKLIGHT_PWM:
|
||||
dsi_panel_pwm_unregister(panel);
|
||||
break;
|
||||
default:
|
||||
DSI_ERR("Backlight type(%d) not supported\n", bl->type);
|
||||
|
@@ -897,7 +897,7 @@ end:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int dsi_parser_get_named_gpio(struct device_node *np,
|
||||
int dsi_parser_get_named_gpio(const struct device_node *np,
|
||||
const char *propname, int index)
|
||||
{
|
||||
int gpio = -EINVAL;
|
||||
|
@@ -45,7 +45,7 @@ int dsi_parser_count_strings(const struct device_node *np,
|
||||
int dsi_parser_read_string_index(const struct device_node *np,
|
||||
const char *propname,
|
||||
int index, const char **output);
|
||||
int dsi_parser_get_named_gpio(struct device_node *np,
|
||||
int dsi_parser_get_named_gpio(const struct device_node *np,
|
||||
const char *propname, int index);
|
||||
#else /* CONFIG_DSI_PARSER */
|
||||
static inline void *dsi_parser_get(struct device *dev)
|
||||
@@ -155,7 +155,7 @@ static inline int dsi_parser_read_string_index(const struct device_node *np,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int dsi_parser_get_named_gpio(struct device_node *np,
|
||||
static inline int dsi_parser_get_named_gpio(const struct device_node *np,
|
||||
const char *propname, int index)
|
||||
{
|
||||
return -ENODEV;
|
||||
@@ -201,7 +201,7 @@ struct dsi_parser_utils {
|
||||
const char *propname);
|
||||
int (*count_strings)(const struct device_node *np,
|
||||
const char *propname);
|
||||
int (*get_named_gpio)(struct device_node *np,
|
||||
int (*get_named_gpio)(const struct device_node *np,
|
||||
const char *propname, int index);
|
||||
int (*get_available_child_count)(const struct device_node *np);
|
||||
};
|
||||
|
@@ -419,7 +419,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
|
||||
crtc->base.id);
|
||||
|
||||
if (funcs->atomic_enable)
|
||||
funcs->atomic_enable(crtc, old_crtc_state);
|
||||
funcs->atomic_enable(crtc, old_state);
|
||||
else
|
||||
funcs->commit(crtc);
|
||||
}
|
||||
@@ -532,7 +532,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
|
||||
|
||||
obj = msm_framebuffer_bo(new_state->fb, 0);
|
||||
msm_obj = to_msm_bo(obj);
|
||||
fence = dma_resv_get_excl_rcu(msm_obj->resv);
|
||||
fence = dma_resv_get_excl_unlocked(msm_obj->resv);
|
||||
|
||||
drm_atomic_set_fence_for_plane(new_state, fence);
|
||||
|
||||
@@ -740,7 +740,7 @@ int msm_atomic_commit(struct drm_device *dev,
|
||||
msm_framebuffer_bo(new_plane_state->fb, 0);
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct dma_fence *fence =
|
||||
dma_resv_get_excl_rcu(msm_obj->resv);
|
||||
dma_resv_get_excl_unlocked(msm_obj->resv);
|
||||
|
||||
drm_atomic_set_fence_for_plane(new_plane_state, fence);
|
||||
}
|
||||
|
125
msm/msm_drv.c
125
msm/msm_drv.c
@@ -41,7 +41,6 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_irq.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_drv.h>
|
||||
@@ -343,6 +342,71 @@ u32 msm_readl(const void __iomem *addr)
|
||||
return val;
|
||||
}
|
||||
|
||||
static irqreturn_t msm_irq(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
|
||||
BUG_ON(!kms);
|
||||
|
||||
return kms->funcs->irq(kms);
|
||||
}
|
||||
|
||||
static void msm_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
|
||||
BUG_ON(!kms);
|
||||
|
||||
kms->funcs->irq_preinstall(kms);
|
||||
}
|
||||
|
||||
static int msm_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
|
||||
BUG_ON(!kms);
|
||||
|
||||
if (kms->funcs->irq_postinstall)
|
||||
return kms->funcs->irq_postinstall(kms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int msm_irq_install(struct drm_device *dev, unsigned int irq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (irq == IRQ_NOTCONNECTED)
|
||||
return -ENOTCONN;
|
||||
|
||||
msm_irq_preinstall(dev);
|
||||
|
||||
ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = msm_irq_postinstall(dev);
|
||||
if (ret) {
|
||||
free_irq(irq, dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
|
||||
kms->funcs->irq_uninstall(kms);
|
||||
free_irq(kms->irq, dev);
|
||||
}
|
||||
|
||||
int msm_get_src_bpc(int chroma_format,
|
||||
int bpc)
|
||||
{
|
||||
@@ -410,7 +474,7 @@ static int msm_drm_uninit(struct device *dev)
|
||||
msm_fbdev_free(ddev);
|
||||
#endif
|
||||
drm_atomic_helper_shutdown(ddev);
|
||||
drm_irq_uninstall(ddev);
|
||||
msm_irq_uninstall(ddev);
|
||||
|
||||
if (kms && kms->funcs)
|
||||
kms->funcs->destroy(kms);
|
||||
@@ -859,7 +923,7 @@ static int msm_drm_component_init(struct device *dev)
|
||||
|
||||
if (kms) {
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
|
||||
ret = msm_irq_install(ddev, platform_get_irq(pdev, 0));
|
||||
pm_runtime_put_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to install IRQ handler\n");
|
||||
@@ -1065,43 +1129,6 @@ static void msm_lastclose(struct drm_device *dev)
|
||||
kms->funcs->lastclose(kms);
|
||||
}
|
||||
|
||||
static irqreturn_t msm_irq(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
BUG_ON(!kms);
|
||||
return kms->funcs->irq(kms);
|
||||
}
|
||||
|
||||
static void msm_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
BUG_ON(!kms);
|
||||
kms->funcs->irq_preinstall(kms);
|
||||
}
|
||||
|
||||
static int msm_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
BUG_ON(!kms);
|
||||
|
||||
if (kms->funcs->irq_postinstall)
|
||||
return kms->funcs->irq_postinstall(kms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
BUG_ON(!kms);
|
||||
kms->funcs->irq_uninstall(kms);
|
||||
}
|
||||
|
||||
/*
|
||||
* DRM ioctls:
|
||||
*/
|
||||
@@ -1688,12 +1715,6 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
|
||||
DRM_UNLOCKED),
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct vm_ops = {
|
||||
.fault = msm_gem_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
static const struct file_operations fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
@@ -1714,24 +1735,12 @@ static struct drm_driver msm_driver = {
|
||||
.open = msm_open,
|
||||
.postclose = msm_postclose,
|
||||
.lastclose = msm_lastclose,
|
||||
.irq_handler = msm_irq,
|
||||
.irq_preinstall = msm_irq_preinstall,
|
||||
.irq_postinstall = msm_irq_postinstall,
|
||||
.irq_uninstall = msm_irq_uninstall,
|
||||
.gem_free_object_unlocked = msm_gem_free_object,
|
||||
.gem_vm_ops = &vm_ops,
|
||||
.dumb_create = msm_gem_dumb_create,
|
||||
.dumb_map_offset = msm_gem_dumb_map_offset,
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = msm_gem_prime_import,
|
||||
.gem_prime_pin = msm_gem_prime_pin,
|
||||
.gem_prime_unpin = msm_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = msm_gem_prime_vmap,
|
||||
.gem_prime_vunmap = msm_gem_prime_vunmap,
|
||||
.gem_prime_mmap = msm_gem_prime_mmap,
|
||||
.ioctls = msm_ioctls,
|
||||
.num_ioctls = ARRAY_SIZE(msm_ioctls),
|
||||
|
@@ -1146,7 +1146,6 @@ void msm_gem_sync(struct drm_gem_object *obj);
|
||||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
@@ -1166,8 +1165,8 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
|
119
msm/msm_gem.c
119
msm/msm_gem.c
@@ -21,7 +21,6 @@
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/pfn_t.h>
|
||||
#include <linux/ion.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
@@ -256,7 +255,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return msm_gem_mmap_obj(vma->vm_private_data, vma);
|
||||
}
|
||||
|
||||
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
@@ -718,6 +717,7 @@ fail:
|
||||
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct dma_buf_map map;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
@@ -752,8 +752,10 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
msm_obj->vaddr =
|
||||
dma_buf_vmap(obj->import_attach->dmabuf);
|
||||
ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
msm_obj->vaddr = map.vaddr;
|
||||
} else {
|
||||
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
||||
VM_MAP, PAGE_KERNEL);
|
||||
@@ -865,6 +867,7 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
|
||||
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
|
||||
@@ -872,8 +875,7 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
|
||||
return;
|
||||
|
||||
if (obj->import_attach) {
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
|
||||
if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, &map);
|
||||
dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
vunmap(msm_obj->vaddr);
|
||||
@@ -899,7 +901,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
|
||||
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
|
||||
long ret;
|
||||
|
||||
ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
|
||||
ret = dma_resv_wait_timeout(msm_obj->resv, write,
|
||||
true, remain);
|
||||
if (ret == 0)
|
||||
return remain == 0 ? -EBUSY : -ETIMEDOUT;
|
||||
@@ -1016,6 +1018,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
|
||||
|
||||
/* object should not be on active list: */
|
||||
WARN_ON(is_active(msm_obj));
|
||||
@@ -1036,7 +1039,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
|
||||
if (obj->import_attach) {
|
||||
if (msm_obj->vaddr)
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, &map);
|
||||
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
@@ -1083,13 +1086,27 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vm_ops = {
|
||||
.fault = msm_gem_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
|
||||
.free = msm_gem_free_object,
|
||||
.pin = msm_gem_prime_pin,
|
||||
.unpin = msm_gem_prime_unpin,
|
||||
.get_sg_table = msm_gem_prime_get_sg_table,
|
||||
.vmap = msm_gem_prime_vmap,
|
||||
.vunmap = msm_gem_prime_vunmap,
|
||||
.vm_ops = &vm_ops,
|
||||
};
|
||||
|
||||
static int msm_gem_new_impl(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags,
|
||||
struct dma_resv *resv,
|
||||
struct drm_gem_object **obj,
|
||||
bool struct_mutex_locked)
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
|
||||
switch (flags & MSM_BO_CACHE_MASK) {
|
||||
@@ -1129,19 +1146,16 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||
msm_obj->in_active_list = false;
|
||||
msm_obj->obj_dirty = false;
|
||||
|
||||
mutex_lock(&priv->mm_lock);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
*obj = &msm_obj->base;
|
||||
(*obj)->funcs = &msm_gem_object_funcs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags, bool struct_mutex_locked)
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
bool use_vram = false;
|
||||
int ret;
|
||||
@@ -1162,14 +1176,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
if (size == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
|
||||
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
msm_obj = to_msm_bo(obj);
|
||||
|
||||
if (use_vram) {
|
||||
struct msm_gem_vma *vma;
|
||||
struct page **pages;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
@@ -1197,6 +1212,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
@@ -1204,18 +1223,6 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
return _msm_gem_new(dev, size, flags, true);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
return _msm_gem_new(dev, size, flags, false);
|
||||
}
|
||||
|
||||
int msm_gem_delayed_import(struct drm_gem_object *obj)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
@@ -1238,9 +1245,6 @@ int msm_gem_delayed_import(struct drm_gem_object *obj)
|
||||
attach = obj->import_attach;
|
||||
attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
|
||||
|
||||
if (msm_obj->flags & MSM_BO_SKIPSYNC)
|
||||
attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
|
||||
/*
|
||||
* dma_buf_map_attachment will call dma_map_sg for ion buffer
|
||||
* mapping, and iova will get mapped when the function returns.
|
||||
@@ -1262,6 +1266,7 @@ fail_import:
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
uint32_t size;
|
||||
@@ -1270,8 +1275,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
|
||||
size = PAGE_ALIGN(dmabuf->size);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
|
||||
false);
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
@@ -1293,19 +1297,16 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
*/
|
||||
msm_obj->flags |= MSM_BO_EXTBUF;
|
||||
|
||||
/*
|
||||
* For all uncached buffers, there is no need to perform cache
|
||||
* maintenance on dma map/unmap time.
|
||||
*/
|
||||
ret = dma_buf_get_flags(dmabuf, &flags);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
|
||||
} else if ((flags & ION_FLAG_CACHED) == 0) {
|
||||
DRM_DEBUG("Buffer is uncached type\n");
|
||||
msm_obj->flags |= MSM_BO_SKIPSYNC;
|
||||
}
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
@@ -1313,12 +1314,12 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova, bool locked)
|
||||
struct drm_gem_object **bo, uint64_t *iova)
|
||||
{
|
||||
void *vaddr;
|
||||
struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
|
||||
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(obj))
|
||||
@@ -1342,31 +1343,14 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
|
||||
return vaddr;
|
||||
err:
|
||||
if (locked)
|
||||
drm_gem_object_put_locked(obj);
|
||||
else
|
||||
drm_gem_object_put(obj);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
|
||||
}
|
||||
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova)
|
||||
{
|
||||
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
|
||||
}
|
||||
|
||||
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova)
|
||||
{
|
||||
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
|
||||
}
|
||||
|
||||
void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||
struct msm_gem_address_space *aspace, bool locked)
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(bo))
|
||||
return;
|
||||
@@ -1374,9 +1358,6 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||
msm_gem_put_vaddr(bo);
|
||||
msm_gem_unpin_iova(bo, aspace);
|
||||
|
||||
if (locked)
|
||||
drm_gem_object_put_locked(bo);
|
||||
else
|
||||
drm_gem_object_put(bo);
|
||||
}
|
||||
|
||||
|
@@ -25,8 +25,7 @@
|
||||
/* Additional internal-use only BO flags: */
|
||||
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
|
||||
#define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */
|
||||
#define MSM_BO_SKIPSYNC 0x40000000 /* skip dmabuf cpu sync */
|
||||
#define MSM_BO_EXTBUF 0x80000000 /* indicate BO is an import buffer */
|
||||
#define MSM_BO_EXTBUF 0x40000000 /* indicate BO is an import buffer */
|
||||
|
||||
struct msm_gem_object;
|
||||
|
||||
|
@@ -25,8 +25,6 @@
|
||||
|
||||
#include <linux/qcom-dma-mapping.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/ion.h>
|
||||
#include <linux/msm_ion.h>
|
||||
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
@@ -39,12 +37,13 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
|
||||
}
|
||||
|
||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
return msm_gem_get_vaddr(obj);
|
||||
map->vaddr = msm_gem_get_vaddr(obj);
|
||||
return IS_ERR_OR_NULL(map->vaddr);
|
||||
}
|
||||
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
|
||||
{
|
||||
msm_gem_put_vaddr(obj);
|
||||
}
|
||||
|
@@ -142,7 +142,7 @@ struct msm_kms_funcs {
|
||||
struct msm_kms {
|
||||
const struct msm_kms_funcs *funcs;
|
||||
|
||||
/* irq number to be passed on to drm_irq_install */
|
||||
/* irq number to be passed on to msm_irq_install */
|
||||
int irq;
|
||||
|
||||
/* mapper-id used to request GEM buffer mapped for scanout: */
|
||||
|
@@ -50,8 +50,7 @@ struct msm_mmu_funcs {
|
||||
int dir, u32 flags);
|
||||
void (*destroy)(struct msm_mmu *mmu);
|
||||
bool (*is_domain_secure)(struct msm_mmu *mmu);
|
||||
int (*set_attribute)(struct msm_mmu *mmu,
|
||||
enum iommu_attr attr, void *data);
|
||||
int (*enable_smmu_translations)(struct msm_mmu *mmu);
|
||||
int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
|
||||
uint32_t dest_address, uint32_t size, int prot);
|
||||
int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
|
||||
|
@@ -129,8 +129,7 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
|
||||
dev_dbg(client->dev, "iommu domain detached\n");
|
||||
}
|
||||
|
||||
static int msm_smmu_set_attribute(struct msm_mmu *mmu,
|
||||
enum iommu_attr attr, void *data)
|
||||
static int msm_enable_smmu_translations(struct msm_mmu *mmu)
|
||||
{
|
||||
struct msm_smmu *smmu = to_msm_smmu(mmu);
|
||||
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
|
||||
@@ -139,9 +138,9 @@ static int msm_smmu_set_attribute(struct msm_mmu *mmu,
|
||||
if (!client || !client->domain)
|
||||
return -ENODEV;
|
||||
|
||||
ret = iommu_domain_set_attr(client->domain, attr, data);
|
||||
ret = qcom_iommu_enable_s1_translation(client->domain);
|
||||
if (ret)
|
||||
DRM_ERROR("set domain attribute failed:%d\n", ret);
|
||||
DRM_ERROR("enable iommu s1 translations failed:%d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -307,7 +306,7 @@ static const struct msm_mmu_funcs funcs = {
|
||||
.unmap_dma_buf = msm_smmu_unmap_dma_buf,
|
||||
.destroy = msm_smmu_destroy,
|
||||
.is_domain_secure = msm_smmu_is_domain_secure,
|
||||
.set_attribute = msm_smmu_set_attribute,
|
||||
.enable_smmu_translations = msm_enable_smmu_translations,
|
||||
.one_to_one_map = msm_smmu_one_to_one_map,
|
||||
.one_to_one_unmap = msm_smmu_one_to_one_unmap,
|
||||
.get_dev = msm_smmu_get_dev,
|
||||
|
@@ -2620,10 +2620,12 @@ sde_connector_best_encoder(struct drm_connector *connector)
|
||||
|
||||
static struct drm_encoder *
|
||||
sde_connector_atomic_best_encoder(struct drm_connector *connector,
|
||||
struct drm_connector_state *connector_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct sde_connector *c_conn;
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct drm_connector_state *connector_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
|
||||
if (!connector) {
|
||||
SDE_ERROR("invalid connector\n");
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_flip_work.h>
|
||||
#include <soc/qcom/of_common.h>
|
||||
|
||||
#include "sde_kms.h"
|
||||
#include "sde_hw_lm.h"
|
||||
@@ -3533,7 +3534,7 @@ static void _sde_crtc_clear_all_blend_stages(struct sde_crtc *sde_crtc)
|
||||
}
|
||||
|
||||
static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct drm_encoder *encoder;
|
||||
@@ -3542,6 +3543,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct sde_splash_display *splash_display;
|
||||
bool cont_splash_enabled = false;
|
||||
size_t i;
|
||||
struct drm_crtc_state *old_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (!crtc) {
|
||||
SDE_ERROR("invalid crtc\n");
|
||||
@@ -3635,7 +3637,7 @@ end:
|
||||
}
|
||||
|
||||
static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
struct sde_crtc *sde_crtc;
|
||||
@@ -4601,7 +4603,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void sde_crtc_enable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct sde_crtc *sde_crtc;
|
||||
struct drm_encoder *encoder;
|
||||
@@ -5347,7 +5349,7 @@ static int _sde_crtc_check_plane_layout(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int sde_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *atomic_state)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct sde_crtc *sde_crtc;
|
||||
@@ -5358,6 +5360,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct sde_multirect_plane_states *multirect_plane = NULL;
|
||||
struct drm_connector *conn;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct drm_crtc_state *state = drm_atomic_get_new_crtc_state(atomic_state, crtc);
|
||||
|
||||
if (!crtc) {
|
||||
SDE_ERROR("invalid crtc\n");
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/soc/qcom/llcc-qcom.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <soc/qcom/of_common.h>
|
||||
|
||||
#include "sde_hw_mdss.h"
|
||||
#include "sde_hw_catalog.h"
|
||||
|
@@ -764,11 +764,6 @@ static int _sde_kms_release_shared_buffer(unsigned int mem_addr,
|
||||
pfn_start = mem_addr >> PAGE_SHIFT;
|
||||
pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
|
||||
|
||||
ret = memblock_free(mem_addr, splash_buffer_size);
|
||||
if (ret) {
|
||||
SDE_ERROR("continuous splash memory free failed:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
|
||||
free_reserved_page(pfn_to_page(pfn_idx));
|
||||
|
||||
@@ -4063,7 +4058,6 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
|
||||
{
|
||||
struct msm_mmu *mmu;
|
||||
int i, ret;
|
||||
int early_map = 0;
|
||||
|
||||
if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev)
|
||||
return -EINVAL;
|
||||
@@ -4096,7 +4090,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
|
||||
ret = _sde_kms_map_all_splash_regions(sde_kms);
|
||||
if (ret) {
|
||||
SDE_ERROR("failed to map ret:%d\n", ret);
|
||||
goto early_map_fail;
|
||||
goto enable_trans_fail;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4104,12 +4098,10 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
|
||||
* disable early-map which would have been enabled during
|
||||
* bootup by smmu through the device-tree hint for cont-spash
|
||||
*/
|
||||
ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
|
||||
&early_map);
|
||||
ret = mmu->funcs->enable_smmu_translations(mmu);
|
||||
if (ret) {
|
||||
SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
|
||||
ret, early_map);
|
||||
goto early_map_fail;
|
||||
SDE_ERROR("failed to enable_s1_translations ret:%d\n", ret);
|
||||
goto enable_trans_fail;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4117,7 +4109,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
|
||||
|
||||
return 0;
|
||||
|
||||
early_map_fail:
|
||||
enable_trans_fail:
|
||||
_sde_kms_unmap_all_splash_regions(sde_kms);
|
||||
fail:
|
||||
_sde_kms_mmu_destroy(sde_kms);
|
||||
|
@@ -19,7 +19,6 @@
|
||||
#ifndef __SDE_KMS_H__
|
||||
#define __SDE_KMS_H__
|
||||
|
||||
#include <linux/msm_ion.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
|
@@ -2704,11 +2704,12 @@ modeset_update:
|
||||
}
|
||||
|
||||
static int sde_plane_atomic_check(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
struct drm_atomic_state *atomic_state)
|
||||
{
|
||||
int ret = 0;
|
||||
struct sde_plane *psde;
|
||||
struct sde_plane_state *pstate;
|
||||
struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, plane);
|
||||
|
||||
if (!plane || !state) {
|
||||
SDE_ERROR("invalid arg(s), plane %d state %d\n",
|
||||
@@ -3392,7 +3393,7 @@ static void _sde_plane_atomic_disable(struct drm_plane *plane,
|
||||
multirect_index, SDE_SSPP_MULTIRECT_TIME_MX);
|
||||
}
|
||||
|
||||
static void sde_plane_atomic_update(struct drm_plane *plane,
|
||||
static void _sde_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct sde_plane *psde;
|
||||
@@ -3423,6 +3424,14 @@ static void sde_plane_atomic_update(struct drm_plane *plane,
|
||||
}
|
||||
}
|
||||
|
||||
static void sde_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_atomic_state *atomic_state)
|
||||
{
|
||||
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane);
|
||||
|
||||
_sde_plane_atomic_update(plane, old_state);
|
||||
}
|
||||
|
||||
void sde_plane_restore(struct drm_plane *plane)
|
||||
{
|
||||
struct sde_plane *psde;
|
||||
@@ -3444,7 +3453,7 @@ void sde_plane_restore(struct drm_plane *plane)
|
||||
SDE_DEBUG_PLANE(psde, "\n");
|
||||
|
||||
/* last plane state is same as current state */
|
||||
sde_plane_atomic_update(plane, plane->state);
|
||||
_sde_plane_atomic_update(plane, plane->state);
|
||||
}
|
||||
|
||||
bool sde_plane_is_cache_required(struct drm_plane *plane,
|
||||
|
@@ -667,8 +667,8 @@ void sde_mini_dump_add_va_region(const char *name, u32 size, void *virt_addr)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
|
||||
struct list_head *b)
|
||||
static int _sde_dump_reg_range_cmp(void *priv, const struct list_head *a,
|
||||
const struct list_head *b)
|
||||
{
|
||||
struct sde_dbg_reg_range *ar, *br;
|
||||
|
||||
@@ -681,8 +681,8 @@ static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
|
||||
return ar->offset.start - br->offset.start;
|
||||
}
|
||||
|
||||
static int _sde_dump_blk_phys_addr_cmp(void *priv, struct list_head *a,
|
||||
struct list_head *b)
|
||||
static int _sde_dump_blk_phys_addr_cmp(void *priv, const struct list_head *a,
|
||||
const struct list_head *b)
|
||||
{
|
||||
struct sde_dbg_reg_base *ar, *br;
|
||||
|
||||
|
@@ -19,7 +19,6 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <soc/qcom/rpmh.h>
|
||||
#include <drm/drm_irq.h>
|
||||
#include "msm_drv.h"
|
||||
#include "sde_rsc_priv.h"
|
||||
#include "sde_dbg.h"
|
||||
|
@@ -1212,6 +1212,8 @@ static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
|
||||
static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
|
||||
struct sde_layer_buffer *buf, struct sde_mdp_data *data)
|
||||
{
|
||||
struct dma_buf_map map;
|
||||
|
||||
dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
|
||||
dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
|
||||
|
||||
@@ -1221,7 +1223,8 @@ static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
|
||||
|
||||
if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
|
||||
dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
|
||||
dbgbuf->vaddr = dma_buf_vmap(dbgbuf->dmabuf);
|
||||
dma_buf_vmap(dbgbuf->dmabuf, &map);
|
||||
dbgbuf->vaddr = map.vaddr;
|
||||
SDEROT_DBG("vaddr mapping: 0x%pK/%ld w:%d/h:%d\n",
|
||||
dbgbuf->vaddr, dbgbuf->buflen,
|
||||
dbgbuf->width, dbgbuf->height);
|
||||
|
Reference in New Issue
Block a user