disp: msm: sde: remove unused functions from sde code
Cleanup unused functions from all modules in sde driver. Change-Id: Ia0e72ab9c281b4200a63ce35bf184e83fe1db5d2 Signed-off-by: Veera Sundaram Sankaran <veeras@codeaurora.org>
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

parent
da71fc99c4
commit
506508e1cd
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -246,22 +246,6 @@ int sde_core_irq_disable_nolock(struct sde_kms *sde_kms, int irq_idx)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 sde_core_irq_read_nolock(struct sde_kms *sde_kms, int irq_idx, bool clear)
|
||||
{
|
||||
if (!sde_kms || !sde_kms->hw_intr ||
|
||||
!sde_kms->hw_intr->ops.get_interrupt_status)
|
||||
return 0;
|
||||
|
||||
if (irq_idx < 0) {
|
||||
SDE_ERROR("[%pS] invalid irq_idx=%d\n",
|
||||
__builtin_return_address(0), irq_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return sde_kms->hw_intr->ops.get_intr_status_nolock(sde_kms->hw_intr,
|
||||
irq_idx, clear);
|
||||
}
|
||||
|
||||
u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
|
||||
{
|
||||
if (!sde_kms || !sde_kms->hw_intr ||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2015-2019, 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __SDE_CORE_IRQ_H__
|
||||
@@ -119,18 +119,6 @@ u32 sde_core_irq_read(
|
||||
int irq_idx,
|
||||
bool clear);
|
||||
|
||||
/**
|
||||
* sde_core_irq_read - no lock version of sde_core_irq_read
|
||||
* @sde_kms: SDE handle
|
||||
* @irq_idx: irq index
|
||||
* @clear: True to clear the irq after read
|
||||
* @return: non-zero if irq detected; otherwise no irq detected
|
||||
*/
|
||||
u32 sde_core_irq_read_nolock(
|
||||
struct sde_kms *sde_kms,
|
||||
int irq_idx,
|
||||
bool clear);
|
||||
|
||||
/**
|
||||
* sde_core_irq_register_callback - For registering callback function on IRQ
|
||||
* interrupt
|
||||
|
@@ -5040,23 +5040,6 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
|
||||
SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
|
||||
i, controller_id, phys_params.split_role);
|
||||
|
||||
if (sde_enc->ops.phys_init) {
|
||||
struct sde_encoder_phys *enc;
|
||||
|
||||
enc = sde_enc->ops.phys_init(intf_type,
|
||||
controller_id,
|
||||
&phys_params);
|
||||
if (enc) {
|
||||
sde_enc->phys_encs[sde_enc->num_phys_encs] =
|
||||
enc;
|
||||
++sde_enc->num_phys_encs;
|
||||
} else
|
||||
SDE_ERROR_ENC(sde_enc,
|
||||
"failed to add phys encs\n");
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (intf_type == INTF_WB) {
|
||||
phys_params.intf_idx = INTF_MAX;
|
||||
phys_params.wb_idx = sde_encoder_get_wb(
|
||||
@@ -5129,10 +5112,7 @@ static const struct drm_encoder_funcs sde_encoder_funcs = {
|
||||
.early_unregister = sde_encoder_early_unregister,
|
||||
};
|
||||
|
||||
struct drm_encoder *sde_encoder_init_with_ops(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info,
|
||||
const struct sde_encoder_ops *ops)
|
||||
struct drm_encoder *sde_encoder_init(struct drm_device *dev, struct msm_display_info *disp_info)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct sde_kms *sde_kms = to_sde_kms(priv->kms);
|
||||
@@ -5149,9 +5129,6 @@ struct drm_encoder *sde_encoder_init_with_ops(
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ops)
|
||||
sde_enc->ops = *ops;
|
||||
|
||||
mutex_init(&sde_enc->enc_lock);
|
||||
ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
|
||||
&drm_enc_mode);
|
||||
@@ -5222,13 +5199,6 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_encoder *sde_encoder_init(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info)
|
||||
{
|
||||
return sde_encoder_init_with_ops(dev, disp_info, NULL);
|
||||
}
|
||||
|
||||
int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
||||
enum msm_event_wait event)
|
||||
{
|
||||
|
@@ -101,22 +101,6 @@ enum sde_enc_rc_states {
|
||||
SDE_ENC_RC_STATE_IDLE
|
||||
};
|
||||
|
||||
/**
|
||||
* struct sde_encoder_ops - callback functions for generic sde encoder
|
||||
* Individual callbacks documented below.
|
||||
*/
|
||||
struct sde_encoder_ops {
|
||||
/**
|
||||
* phys_init - phys initialization function
|
||||
* @type: controller type
|
||||
* @controller_id: controller id
|
||||
* @phys_init_params: Pointer of structure sde_enc_phys_init_params
|
||||
* Returns: Pointer of sde_encoder_phys, NULL if failed
|
||||
*/
|
||||
void *(*phys_init)(enum sde_intf_type type,
|
||||
u32 controller_id, void *phys_init_params);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct sde_encoder_virt - virtual encoder. Container of one or more physical
|
||||
* encoders. Virtual encoder manages one "logical" display. Physical
|
||||
@@ -127,7 +111,6 @@ struct sde_encoder_ops {
|
||||
* @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
|
||||
* @bus_scaling_client: Client handle to the bus scaling interface
|
||||
* @te_source: vsync source pin information
|
||||
* @ops: Encoder ops from init function
|
||||
* @num_phys_encs: Actual number of physical encoders contained.
|
||||
* @phys_encs: Container of physical encoders managed.
|
||||
* @phys_vid_encs: Video physical encoders for panel mode switch.
|
||||
@@ -206,8 +189,6 @@ struct sde_encoder_virt {
|
||||
uint32_t display_num_of_h_tiles;
|
||||
uint32_t te_source;
|
||||
|
||||
struct sde_encoder_ops ops;
|
||||
|
||||
unsigned int num_phys_encs;
|
||||
struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
|
||||
struct sde_encoder_phys *phys_vid_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
|
||||
@@ -447,18 +428,6 @@ struct drm_encoder *sde_encoder_init(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info);
|
||||
|
||||
/**
|
||||
* sde_encoder_init_with_ops - initialize virtual encoder object with init ops
|
||||
* @dev: Pointer to drm device structure
|
||||
* @disp_info: Pointer to display information structure
|
||||
* @ops: Pointer to encoder ops structure
|
||||
* Returns: Pointer to newly created drm encoder
|
||||
*/
|
||||
struct drm_encoder *sde_encoder_init_with_ops(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info,
|
||||
const struct sde_encoder_ops *ops);
|
||||
|
||||
/**
|
||||
* sde_encoder_destroy - destroy previously initialized virtual encoder
|
||||
* @drm_enc: Pointer to previously created drm encoder structure
|
||||
|
@@ -128,7 +128,6 @@ struct sde_encoder_virt_ops {
|
||||
* @is_autorefresh_enabled: provides the autorefresh current
|
||||
* enable/disable state.
|
||||
* @get_line_count: Obtain current internal vertical line count
|
||||
* @get_wr_line_count: Obtain current output vertical line count
|
||||
* @wait_dma_trigger: Returns true if lut dma has to trigger and wait
|
||||
* unitl transaction is complete.
|
||||
* @wait_for_active: Wait for display scan line to be in active area
|
||||
@@ -182,7 +181,6 @@ struct sde_encoder_phys_ops {
|
||||
void (*restore)(struct sde_encoder_phys *phys);
|
||||
bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
|
||||
int (*get_line_count)(struct sde_encoder_phys *phys);
|
||||
int (*get_wr_line_count)(struct sde_encoder_phys *phys);
|
||||
bool (*wait_dma_trigger)(struct sde_encoder_phys *phys);
|
||||
int (*wait_for_active)(struct sde_encoder_phys *phys);
|
||||
void (*setup_vsync_source)(struct sde_encoder_phys *phys, u32 vsync_source);
|
||||
|
@@ -313,18 +313,6 @@ static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx)
|
||||
SDE_ATRACE_END("wr_ptr_irq");
|
||||
}
|
||||
|
||||
static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
|
||||
{
|
||||
struct sde_encoder_phys *phys_enc = arg;
|
||||
|
||||
if (!phys_enc)
|
||||
return;
|
||||
|
||||
if (phys_enc->parent_ops.handle_underrun_virt)
|
||||
phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
|
||||
phys_enc);
|
||||
}
|
||||
|
||||
static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
|
||||
struct sde_encoder_phys *phys_enc)
|
||||
{
|
||||
@@ -356,9 +344,6 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
|
||||
else
|
||||
irq->hw_idx = phys_enc->hw_pp->idx;
|
||||
|
||||
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
|
||||
irq->hw_idx = phys_enc->intf_idx;
|
||||
|
||||
irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
|
||||
if (phys_enc->has_intf_te)
|
||||
irq->hw_idx = phys_enc->hw_intf->idx;
|
||||
@@ -1232,38 +1217,6 @@ static int sde_encoder_phys_cmd_te_get_line_count(
|
||||
return line_count;
|
||||
}
|
||||
|
||||
static int sde_encoder_phys_cmd_get_write_line_count(
|
||||
struct sde_encoder_phys *phys_enc)
|
||||
{
|
||||
struct sde_hw_pingpong *hw_pp;
|
||||
struct sde_hw_intf *hw_intf;
|
||||
struct sde_hw_pp_vsync_info info;
|
||||
|
||||
if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sde_encoder_phys_cmd_is_master(phys_enc))
|
||||
return -EINVAL;
|
||||
|
||||
if (phys_enc->has_intf_te) {
|
||||
hw_intf = phys_enc->hw_intf;
|
||||
if (!hw_intf->ops.get_vsync_info)
|
||||
return -EINVAL;
|
||||
|
||||
if (hw_intf->ops.get_vsync_info(hw_intf, &info))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
hw_pp = phys_enc->hw_pp;
|
||||
if (!hw_pp->ops.get_vsync_info)
|
||||
return -EINVAL;
|
||||
|
||||
if (hw_pp->ops.get_vsync_info(hw_pp, &info))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return (int)info.wr_ptr_line_count;
|
||||
}
|
||||
|
||||
static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
|
||||
{
|
||||
struct sde_encoder_phys_cmd *cmd_enc =
|
||||
@@ -1939,7 +1892,6 @@ static void sde_encoder_phys_cmd_init_ops(struct sde_encoder_phys_ops *ops)
|
||||
ops->is_autorefresh_enabled =
|
||||
sde_encoder_phys_cmd_is_autorefresh_enabled;
|
||||
ops->get_line_count = sde_encoder_phys_cmd_te_get_line_count;
|
||||
ops->get_wr_line_count = sde_encoder_phys_cmd_get_write_line_count;
|
||||
ops->wait_for_active = NULL;
|
||||
ops->setup_vsync_source = sde_encoder_phys_cmd_setup_vsync_source;
|
||||
ops->setup_misr = sde_encoder_helper_setup_misr;
|
||||
@@ -2029,12 +1981,6 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
|
||||
|
||||
irq->cb.func = sde_encoder_phys_cmd_te_rd_ptr_irq;
|
||||
|
||||
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
|
||||
irq->name = "underrun";
|
||||
irq->intr_type = SDE_IRQ_TYPE_INTF_UNDER_RUN;
|
||||
irq->intr_idx = INTR_IDX_UNDERRUN;
|
||||
irq->cb.func = sde_encoder_phys_cmd_underrun_irq;
|
||||
|
||||
irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
|
||||
irq->name = "autorefresh_done";
|
||||
|
||||
|
@@ -1312,7 +1312,6 @@ static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
|
||||
ops->trigger_flush = sde_encoder_helper_trigger_flush;
|
||||
ops->hw_reset = sde_encoder_helper_hw_reset;
|
||||
ops->get_line_count = sde_encoder_phys_vid_get_line_count;
|
||||
ops->get_wr_line_count = sde_encoder_phys_vid_get_line_count;
|
||||
ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
|
||||
ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
|
||||
ops->prepare_commit = sde_encoder_phys_vid_prepare_for_commit;
|
||||
|
@@ -935,24 +935,6 @@ int sde_format_get_plane_sizes(
|
||||
return _sde_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
|
||||
}
|
||||
|
||||
int sde_format_get_block_size(const struct sde_format *fmt,
|
||||
uint32_t *w, uint32_t *h)
|
||||
{
|
||||
if (!fmt || !w || !h) {
|
||||
DRM_ERROR("invalid pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TP10 is 96x96 and all others are 128x128 */
|
||||
if (SDE_FORMAT_IS_YUV(fmt) && SDE_FORMAT_IS_DX(fmt) &&
|
||||
(fmt->num_planes == 2) && fmt->unpack_tight)
|
||||
*w = *h = 96;
|
||||
else
|
||||
*w = *h = 128;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t sde_format_get_framebuffer_size(
|
||||
const uint32_t format,
|
||||
const uint32_t width,
|
||||
|
@@ -65,18 +65,6 @@ int sde_format_get_plane_sizes(
|
||||
struct sde_hw_fmt_layout *layout,
|
||||
const uint32_t *pitches);
|
||||
|
||||
/**
|
||||
* sde_format_get_block_size - get block size of given format when
|
||||
* operating in block mode
|
||||
* @fmt: pointer to sde_format
|
||||
* @w: pointer to width of the block
|
||||
* @h: pointer to height of the block
|
||||
*
|
||||
* Return: 0 if success; error oode otherwise
|
||||
*/
|
||||
int sde_format_get_block_size(const struct sde_format *fmt,
|
||||
uint32_t *w, uint32_t *h);
|
||||
|
||||
/**
|
||||
* sde_format_check_modified_format - validate format and buffers for
|
||||
* sde non-standard, i.e. modified format
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
@@ -69,80 +69,3 @@ void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk)
|
||||
mutex_unlock(&sde_hw_blk_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_hw_blk_get - get hw_blk from free pool
|
||||
* @hw_blk: if specified, increment reference count only
|
||||
* @type: if hw_blk is not specified, allocate the next available of this type
|
||||
* @id: if specified (>= 0), allocate the given instance of the above type
|
||||
* return: pointer to hw block object
|
||||
*/
|
||||
struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id)
|
||||
{
|
||||
struct sde_hw_blk *curr;
|
||||
int rc, refcount;
|
||||
|
||||
if (!hw_blk) {
|
||||
mutex_lock(&sde_hw_blk_lock);
|
||||
list_for_each_entry(curr, &sde_hw_blk_list, list) {
|
||||
if ((curr->type != type) ||
|
||||
(id >= 0 && curr->id != id) ||
|
||||
(id < 0 &&
|
||||
atomic_read(&curr->refcount)))
|
||||
continue;
|
||||
|
||||
hw_blk = curr;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&sde_hw_blk_lock);
|
||||
}
|
||||
|
||||
if (!hw_blk) {
|
||||
pr_debug("no hw_blk:%d\n", type);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
refcount = atomic_inc_return(&hw_blk->refcount);
|
||||
|
||||
if (refcount == 1 && hw_blk->ops.start) {
|
||||
rc = hw_blk->ops.start(hw_blk);
|
||||
if (rc) {
|
||||
pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
|
||||
goto error_start;
|
||||
}
|
||||
}
|
||||
|
||||
pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
|
||||
hw_blk->id, refcount);
|
||||
return hw_blk;
|
||||
|
||||
error_start:
|
||||
sde_hw_blk_put(hw_blk);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
|
||||
* @hw_blk: hw block to be freed
|
||||
* @free_blk: function to be called when reference count goes to zero
|
||||
*/
|
||||
void sde_hw_blk_put(struct sde_hw_blk *hw_blk)
|
||||
{
|
||||
if (!hw_blk) {
|
||||
pr_err("invalid parameters\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
|
||||
atomic_read(&hw_blk->refcount));
|
||||
|
||||
if (!atomic_read(&hw_blk->refcount)) {
|
||||
pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&hw_blk->refcount))
|
||||
return;
|
||||
|
||||
if (hw_blk->ops.stop)
|
||||
hw_blk->ops.stop(hw_blk);
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _SDE_HW_BLK_H
|
||||
@@ -41,6 +41,4 @@ int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
|
||||
struct sde_hw_blk_ops *ops);
|
||||
void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk);
|
||||
|
||||
struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id);
|
||||
void sde_hw_blk_put(struct sde_hw_blk *hw_blk);
|
||||
#endif /*_SDE_HW_BLK_H */
|
||||
|
@@ -1199,20 +1199,6 @@ static void sde_hw_ctl_update_wb_cfg(struct sde_hw_ctl *ctx,
|
||||
SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
|
||||
}
|
||||
|
||||
static inline u32 sde_hw_ctl_read_ctl_top(struct sde_hw_ctl *ctx)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
u32 ctl_top;
|
||||
|
||||
if (!ctx) {
|
||||
pr_err("Invalid input argument\n");
|
||||
return 0;
|
||||
}
|
||||
c = &ctx->hw;
|
||||
ctl_top = SDE_REG_READ(c, CTL_TOP);
|
||||
return ctl_top;
|
||||
}
|
||||
|
||||
static inline u32 sde_hw_ctl_read_ctl_layers(struct sde_hw_ctl *ctx, int index)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
@@ -1312,7 +1298,6 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
|
||||
ops->get_flush_register = sde_hw_ctl_get_flush_register;
|
||||
ops->trigger_start = sde_hw_ctl_trigger_start;
|
||||
ops->trigger_pending = sde_hw_ctl_trigger_pending;
|
||||
ops->read_ctl_top = sde_hw_ctl_read_ctl_top;
|
||||
ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers;
|
||||
ops->update_wb_cfg = sde_hw_ctl_update_wb_cfg;
|
||||
ops->reset = sde_hw_ctl_reset_control;
|
||||
|
@@ -380,14 +380,6 @@ struct sde_hw_ctl_ops {
|
||||
int (*update_bitmask)(struct sde_hw_ctl *ctx,
|
||||
enum ctl_hw_flush_type type, u32 blk_idx, bool enable);
|
||||
|
||||
/**
|
||||
* read CTL_TOP register value and return
|
||||
* the data.
|
||||
* @ctx : ctl path ctx pointer
|
||||
* @return : CTL top register value
|
||||
*/
|
||||
u32 (*read_ctl_top)(struct sde_hw_ctl *ctx);
|
||||
|
||||
/**
|
||||
* get interfaces for the active CTL .
|
||||
* @ctx : ctl path ctx pointer
|
||||
|
@@ -441,18 +441,6 @@ static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
|
||||
uint32_t mask)
|
||||
{
|
||||
if (!intr)
|
||||
return;
|
||||
|
||||
SDE_REG_WRITE(&intr->hw, reg_off, mask);
|
||||
|
||||
/* ensure register writes go through */
|
||||
wmb();
|
||||
}
|
||||
|
||||
static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
|
||||
void (*cbfunc)(void *, int),
|
||||
void *arg)
|
||||
@@ -656,18 +644,6 @@ static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
|
||||
uint32_t *mask)
|
||||
{
|
||||
if (!intr || !mask)
|
||||
return -EINVAL;
|
||||
|
||||
*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
|
||||
| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
|
||||
uint32_t *sources)
|
||||
{
|
||||
@@ -713,32 +689,6 @@ static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
|
||||
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
|
||||
}
|
||||
|
||||
static void sde_hw_intr_clear_intr_status_force_mask(struct sde_hw_intr *intr,
|
||||
int irq_idx, u32 irq_mask)
|
||||
{
|
||||
int reg_idx;
|
||||
|
||||
if (!intr)
|
||||
return;
|
||||
|
||||
if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
|
||||
pr_err("invalid IRQ index: [%d]\n", irq_idx);
|
||||
return;
|
||||
}
|
||||
|
||||
reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
|
||||
if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
|
||||
pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
|
||||
return;
|
||||
}
|
||||
|
||||
SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
|
||||
irq_mask);
|
||||
|
||||
/* ensure register writes go through */
|
||||
wmb();
|
||||
}
|
||||
|
||||
static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
|
||||
int irq_idx)
|
||||
{
|
||||
@@ -849,35 +799,6 @@ static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
|
||||
return intr_status;
|
||||
}
|
||||
|
||||
static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
|
||||
int irq_idx, bool clear)
|
||||
{
|
||||
int reg_idx;
|
||||
unsigned long irq_flags;
|
||||
u32 intr_status = 0;
|
||||
|
||||
if (!intr)
|
||||
return 0;
|
||||
|
||||
if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
|
||||
pr_err("invalid IRQ index: [%d]\n", irq_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
|
||||
if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
|
||||
pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&intr->irq_lock, irq_flags);
|
||||
intr_status = SDE_REG_READ(&intr->hw,
|
||||
intr->sde_irq_tbl[reg_idx].status_off);
|
||||
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
|
||||
|
||||
return intr_status;
|
||||
}
|
||||
|
||||
static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
|
||||
struct sde_intr_irq_offsets *item)
|
||||
{
|
||||
@@ -957,23 +878,18 @@ static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
|
||||
|
||||
static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
|
||||
{
|
||||
ops->set_mask = sde_hw_intr_set_mask;
|
||||
ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
|
||||
ops->enable_irq_nolock = sde_hw_intr_enable_irq_nolock;
|
||||
ops->disable_irq_nolock = sde_hw_intr_disable_irq_nolock;
|
||||
ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
|
||||
ops->clear_all_irqs = sde_hw_intr_clear_irqs;
|
||||
ops->disable_all_irqs = sde_hw_intr_disable_irqs;
|
||||
ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
|
||||
ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
|
||||
ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
|
||||
ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
|
||||
ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
|
||||
ops->clear_intr_status_force_mask =
|
||||
sde_hw_intr_clear_intr_status_force_mask;
|
||||
ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
|
||||
ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
|
||||
ops->get_intr_status_nomask = sde_hw_intr_get_intr_status_nomask;
|
||||
}
|
||||
|
||||
static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2016-2019, 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _SDE_HW_INTERRUPTS_H
|
||||
@@ -102,18 +102,6 @@ struct sde_hw_intr;
|
||||
* Interrupt operations.
|
||||
*/
|
||||
struct sde_hw_intr_ops {
|
||||
/**
|
||||
* set_mask - Programs the given interrupt register with the
|
||||
* given interrupt mask. Register value will get overwritten.
|
||||
* @intr: HW interrupt handle
|
||||
* @reg_off: MDSS HW register offset
|
||||
* @irqmask: IRQ mask value
|
||||
*/
|
||||
void (*set_mask)(
|
||||
struct sde_hw_intr *intr,
|
||||
uint32_t reg,
|
||||
uint32_t irqmask);
|
||||
|
||||
/**
|
||||
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type
|
||||
* Used for all irq related ops
|
||||
@@ -187,7 +175,7 @@ struct sde_hw_intr_ops {
|
||||
|
||||
/**
|
||||
* clear_interrupt_status - Clears HW interrupt status based on given
|
||||
* lookup IRQ index.
|
||||
* lookup IRQ index
|
||||
* @intr: HW interrupt handle
|
||||
* @irq_idx: Lookup irq index return from irq_idx_lookup
|
||||
*/
|
||||
@@ -204,17 +192,6 @@ struct sde_hw_intr_ops {
|
||||
struct sde_hw_intr *intr,
|
||||
int irq_idx);
|
||||
|
||||
/**
|
||||
* clear_intr_status_force_mask() - clear the HW interrupts
|
||||
* @intr: HW interrupt handle
|
||||
* @irq_idx: Lookup irq index return from irq_idx_lookup
|
||||
* @irq_mask: irq mask to clear
|
||||
*/
|
||||
void (*clear_intr_status_force_mask)(
|
||||
struct sde_hw_intr *intr,
|
||||
int irq_idx,
|
||||
u32 irq_mask);
|
||||
|
||||
/**
|
||||
* get_interrupt_status - Gets HW interrupt status, and clear if set,
|
||||
* based on given lookup IRQ index.
|
||||
@@ -238,32 +215,6 @@ struct sde_hw_intr_ops {
|
||||
int irq_idx,
|
||||
bool clear);
|
||||
|
||||
/**
|
||||
* get_intr_status_nomask - nolock version of get_interrupt_status
|
||||
* @intr: HW interrupt handle
|
||||
* @irq_idx: Lookup irq index return from irq_idx_lookup
|
||||
* @clear: True to clear irq after read
|
||||
*/
|
||||
u32 (*get_intr_status_nomask)(
|
||||
struct sde_hw_intr *intr,
|
||||
int irq_idx,
|
||||
bool clear);
|
||||
|
||||
/**
|
||||
* get_valid_interrupts - Gets a mask of all valid interrupt sources
|
||||
* within SDE. These are actually status bits
|
||||
* within interrupt registers that specify the
|
||||
* source of the interrupt in IRQs. For example,
|
||||
* valid interrupt sources can be MDP, DSI,
|
||||
* HDMI etc.
|
||||
* @intr: HW interrupt handle
|
||||
* @mask: Returning the interrupt source MASK
|
||||
* @return: 0 for success, otherwise failure
|
||||
*/
|
||||
int (*get_valid_interrupts)(
|
||||
struct sde_hw_intr *intr,
|
||||
uint32_t *mask);
|
||||
|
||||
/**
|
||||
* get_interrupt_sources - Gets the bitmask of the SDE interrupt
|
||||
* source that are currently fired.
|
||||
|
@@ -268,17 +268,6 @@ static void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp)
|
||||
SDE_REG_WRITE(c, PP_DSC_MODE, 1);
|
||||
}
|
||||
|
||||
static u32 sde_hw_pp_get_dsc_status(struct sde_hw_pingpong *pp)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
|
||||
if (!pp)
|
||||
return 0;
|
||||
|
||||
c = &pp->hw;
|
||||
return SDE_REG_READ(c, PP_DSC_MODE);
|
||||
}
|
||||
|
||||
static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
@@ -490,7 +479,6 @@ static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
|
||||
ops->setup_dsc = sde_hw_pp_setup_dsc;
|
||||
ops->enable_dsc = sde_hw_pp_dsc_enable;
|
||||
ops->disable_dsc = sde_hw_pp_dsc_disable;
|
||||
ops->get_dsc_status = sde_hw_pp_get_dsc_status;
|
||||
}
|
||||
|
||||
version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version);
|
||||
|
@@ -106,12 +106,6 @@ struct sde_hw_pingpong_ops {
|
||||
*/
|
||||
void (*disable_dsc)(struct sde_hw_pingpong *pp);
|
||||
|
||||
/**
|
||||
* Get DSC status
|
||||
* @Return: register value of DSC config
|
||||
*/
|
||||
u32 (*get_dsc_status)(struct sde_hw_pingpong *pp);
|
||||
|
||||
/**
|
||||
* Program the dither hw block
|
||||
*/
|
||||
|
@@ -119,18 +119,6 @@ static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
|
||||
SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
|
||||
}
|
||||
|
||||
static u32 sde_hw_get_split_flush(struct sde_hw_mdp *mdp)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
|
||||
if (!mdp)
|
||||
return 0;
|
||||
|
||||
c = &mdp->hw;
|
||||
|
||||
return (SDE_REG_READ(c, SSPP_SPARE) & 0x1);
|
||||
}
|
||||
|
||||
static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
|
||||
struct split_pipe_cfg *cfg)
|
||||
{
|
||||
@@ -234,39 +222,6 @@ static int sde_hw_get_clk_ctrl_status(struct sde_hw_mdp *mdp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
|
||||
struct sde_danger_safe_status *status)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
u32 value;
|
||||
|
||||
if (!mdp || !status)
|
||||
return;
|
||||
|
||||
c = &mdp->hw;
|
||||
|
||||
value = SDE_REG_READ(c, DANGER_STATUS);
|
||||
status->mdp = (value >> 0) & 0x3;
|
||||
status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
|
||||
status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
|
||||
status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
|
||||
status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
|
||||
status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
|
||||
status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
|
||||
status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
|
||||
status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
|
||||
status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
|
||||
status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
|
||||
status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
|
||||
status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
|
||||
status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
|
||||
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
|
||||
status->wb[WB_0] = 0;
|
||||
status->wb[WB_1] = 0;
|
||||
status->wb[WB_2] = (value >> 2) & 0x3;
|
||||
status->wb[WB_3] = 0;
|
||||
}
|
||||
|
||||
static void _update_vsync_source(struct sde_hw_mdp *mdp,
|
||||
struct sde_vsync_source_cfg *cfg)
|
||||
{
|
||||
@@ -354,52 +309,6 @@ static void sde_hw_setup_vsync_source_v1(struct sde_hw_mdp *mdp,
|
||||
_update_vsync_source(mdp, cfg);
|
||||
}
|
||||
|
||||
|
||||
static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
|
||||
struct sde_danger_safe_status *status)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
u32 value;
|
||||
|
||||
if (!mdp || !status)
|
||||
return;
|
||||
|
||||
c = &mdp->hw;
|
||||
|
||||
value = SDE_REG_READ(c, SAFE_STATUS);
|
||||
status->mdp = (value >> 0) & 0x1;
|
||||
status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
|
||||
status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
|
||||
status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
|
||||
status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
|
||||
status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
|
||||
status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
|
||||
status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
|
||||
status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
|
||||
status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
|
||||
status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
|
||||
status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
|
||||
status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
|
||||
status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
|
||||
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
|
||||
status->wb[WB_0] = 0;
|
||||
status->wb[WB_1] = 0;
|
||||
status->wb[WB_2] = (value >> 2) & 0x1;
|
||||
status->wb[WB_3] = 0;
|
||||
}
|
||||
|
||||
static void sde_hw_setup_dce(struct sde_hw_mdp *mdp, u32 dce_sel)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c;
|
||||
|
||||
if (!mdp)
|
||||
return;
|
||||
|
||||
c = &mdp->hw;
|
||||
|
||||
SDE_REG_WRITE(c, DCE_SEL, dce_sel);
|
||||
}
|
||||
|
||||
void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
|
||||
{
|
||||
struct sde_hw_blk_reg_map c;
|
||||
@@ -610,11 +519,7 @@ static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
|
||||
ops->setup_cdm_output = sde_hw_setup_cdm_output;
|
||||
ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
|
||||
ops->get_clk_ctrl_status = sde_hw_get_clk_ctrl_status;
|
||||
ops->get_danger_status = sde_hw_get_danger_status;
|
||||
ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl;
|
||||
ops->get_safe_status = sde_hw_get_safe_status;
|
||||
ops->get_split_flush_status = sde_hw_get_split_flush;
|
||||
ops->setup_dce = sde_hw_setup_dce;
|
||||
ops->reset_ubwc = sde_hw_reset_ubwc;
|
||||
ops->intf_audio_select = sde_hw_intf_audio_select;
|
||||
ops->set_mdp_hw_events = sde_hw_mdp_events;
|
||||
|
@@ -147,21 +147,6 @@ struct sde_hw_mdp_ops {
|
||||
int (*get_clk_ctrl_status)(struct sde_hw_mdp *mdp,
|
||||
enum sde_clk_ctrl_type clk_ctrl, bool *status);
|
||||
|
||||
/**
|
||||
* setup_dce - set DCE mux for DSC ctrl path
|
||||
* @mdp: mdp top context driver
|
||||
* @dce_sel: dce_mux value
|
||||
*/
|
||||
void (*setup_dce)(struct sde_hw_mdp *mdp, u32 dce_sel);
|
||||
|
||||
/**
|
||||
* get_danger_status - get danger status
|
||||
* @mdp: mdp top context driver
|
||||
* @status: Pointer to danger safe status
|
||||
*/
|
||||
void (*get_danger_status)(struct sde_hw_mdp *mdp,
|
||||
struct sde_danger_safe_status *status);
|
||||
|
||||
/**
|
||||
* setup_vsync_source - setup vsync source configuration details
|
||||
* @mdp: mdp top context driver
|
||||
@@ -170,20 +155,6 @@ struct sde_hw_mdp_ops {
|
||||
void (*setup_vsync_source)(struct sde_hw_mdp *mdp,
|
||||
struct sde_vsync_source_cfg *cfg);
|
||||
|
||||
/**
|
||||
* get_safe_status - get safe status
|
||||
* @mdp: mdp top context driver
|
||||
* @status: Pointer to danger safe status
|
||||
*/
|
||||
void (*get_safe_status)(struct sde_hw_mdp *mdp,
|
||||
struct sde_danger_safe_status *status);
|
||||
|
||||
/**
|
||||
* get_split_flush_status - get split flush status
|
||||
* @mdp: mdp top context driver
|
||||
*/
|
||||
u32 (*get_split_flush_status)(struct sde_hw_mdp *mdp);
|
||||
|
||||
/**
|
||||
* reset_ubwc - reset top level UBWC configuration
|
||||
* @mdp: mdp top context driver
|
||||
|
@@ -2675,61 +2675,6 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
|
||||
struct sde_hw_blk *hw, struct drm_encoder *enc)
|
||||
{
|
||||
struct sde_rm_hw_blk *blk;
|
||||
struct sde_rm_rsvp *rsvp;
|
||||
int ret = 0;
|
||||
|
||||
if (!rm || !hw || !enc) {
|
||||
SDE_ERROR("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hw->type >= SDE_HW_BLK_MAX) {
|
||||
SDE_ERROR("invalid HW type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&rm->rm_lock);
|
||||
|
||||
rsvp = _sde_rm_get_rsvp_cur(rm, enc);
|
||||
if (!rsvp) {
|
||||
rsvp = kzalloc(sizeof(*rsvp), GFP_KERNEL);
|
||||
if (!rsvp) {
|
||||
ret = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
rsvp->seq = ++rm->rsvp_next_seq;
|
||||
rsvp->enc_id = enc->base.id;
|
||||
list_add_tail(&rsvp->list, &rm->rsvps);
|
||||
|
||||
SDE_DEBUG("create rsvp %d for enc %d\n",
|
||||
rsvp->seq, rsvp->enc_id);
|
||||
}
|
||||
|
||||
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
|
||||
if (!blk) {
|
||||
ret = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
blk->type = hw->type;
|
||||
blk->id = hw->id;
|
||||
blk->hw = hw;
|
||||
blk->rsvp = rsvp;
|
||||
list_add_tail(&blk->list, &rm->hw_blks[hw->type]);
|
||||
|
||||
SDE_DEBUG("create blk %d %d for rsvp %d enc %d\n", blk->type, blk->id,
|
||||
rsvp->seq, rsvp->enc_id);
|
||||
|
||||
end:
|
||||
mutex_unlock(&rm->rm_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sde_rm_ext_blk_destroy(struct sde_rm *rm,
|
||||
struct drm_encoder *enc)
|
||||
{
|
||||
|
@@ -397,18 +397,6 @@ bool sde_rm_topology_is_group(struct sde_rm *rm,
|
||||
struct drm_crtc_state *state,
|
||||
enum sde_rm_topology_group group);
|
||||
|
||||
/**
|
||||
* sde_rm_ext_blk_create_reserve - Create external HW blocks
|
||||
* in resource manager and reserve for specific encoder.
|
||||
* @rm: SDE Resource Manager handle
|
||||
* @hw: external HW block
|
||||
* @drm_enc: DRM Encoder handle
|
||||
* @Return: 0 on Success otherwise -ERROR
|
||||
*/
|
||||
int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
|
||||
struct sde_hw_blk *hw,
|
||||
struct drm_encoder *enc);
|
||||
|
||||
/**
|
||||
* sde_rm_ext_blk_destroy - Given the encoder for the display chain, release
|
||||
* external HW blocks created for that.
|
||||
|
Reference in New Issue
Block a user