disp: msm: snapshot of msm and sde driver
This snapshot ports changes from 4.14 to 4.19 into the msm and sde layer. Snapshot was taken as of commit 0f8fb25421ff ("cnss2: Add device version to SOC info structure"). Change-Id: I59b799a78319c2db6930a2a10bc38976f8c09898 Signed-off-by: Samantha Tran <samtran@codeaurora.org>
This commit is contained in:
@@ -2438,7 +2438,8 @@ static int dp_display_mst_connector_update_link_info(
|
||||
memcpy(&dp_panel->link_info, &dp->panel->link_info,
|
||||
sizeof(dp_panel->link_info));
|
||||
|
||||
DP_MST_DEBUG("dp mst connector:%d link info updated\n");
|
||||
DP_MST_DEBUG("dp mst connector:%d link info updated\n",
|
||||
connector->base.id);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@@ -1201,7 +1201,7 @@ static void dp_panel_dsc_prepare_pps_packet(struct dp_panel *dp_panel)
|
||||
static void _dp_panel_dsc_get_num_extra_pclk(struct msm_display_dsc_info *dsc,
|
||||
enum msm_display_compression_ratio ratio)
|
||||
{
|
||||
unsigned int dto_n, dto_d, remainder;
|
||||
unsigned int dto_n = 0, dto_d = 0, remainder;
|
||||
int ack_required, last_few_ack_required, accum_ack;
|
||||
int last_few_pclk, last_few_pclk_required;
|
||||
int start, temp, line_width = dsc->pic_width/2;
|
||||
@@ -2813,7 +2813,7 @@ static bool dp_panel_read_mst_cap(struct dp_panel *dp_panel)
|
||||
|
||||
if (!dp_panel) {
|
||||
pr_err("invalid input\n");
|
||||
goto end;
|
||||
return 0;
|
||||
}
|
||||
|
||||
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
|
||||
|
@@ -1963,6 +1963,8 @@ static int msm_pdev_probe(struct platform_device *pdev)
|
||||
ret = add_display_components(&pdev->dev, &match);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
|
||||
|
@@ -65,11 +65,11 @@ struct msm_gem_address_space;
|
||||
struct msm_gem_vma;
|
||||
|
||||
#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
|
||||
#define MAX_CRTCS 8
|
||||
#define MAX_CRTCS 16
|
||||
#define MAX_PLANES 20
|
||||
#define MAX_ENCODERS 8
|
||||
#define MAX_BRIDGES 8
|
||||
#define MAX_CONNECTORS 8
|
||||
#define MAX_ENCODERS 16
|
||||
#define MAX_BRIDGES 16
|
||||
#define MAX_CONNECTORS 16
|
||||
|
||||
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
|
||||
|
||||
|
@@ -95,7 +95,6 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
|
||||
struct msm_drm_private *priv;
|
||||
struct msm_kms *kms;
|
||||
int ret;
|
||||
u32 domain;
|
||||
|
||||
if (!dma_buf || !dev->dev_private)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@@ -142,14 +141,14 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
|
||||
|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
|
||||
attach_dev = dev->dev;
|
||||
else
|
||||
DRM_ERROR("invalid ion secure flag: 0x%x\n", flags);
|
||||
DRM_ERROR("invalid ion secure flag: 0x%lx\n", flags);
|
||||
} else {
|
||||
attach_dev = kms->funcs->get_address_space_device(kms,
|
||||
MSM_SMMU_DOMAIN_UNSECURE);
|
||||
}
|
||||
|
||||
if (!attach_dev) {
|
||||
DRM_ERROR("aspace device not found for domain:%d\n", domain);
|
||||
DRM_ERROR("aspace device not found for domain\n");
|
||||
ret = -EINVAL;
|
||||
goto fail_put;
|
||||
}
|
||||
|
@@ -2898,6 +2898,9 @@ static void sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
|
||||
DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&phase, 0, sizeof(phase));
|
||||
|
||||
/* read intr_status register value */
|
||||
num_mixers = sde_crtc->num_mixers;
|
||||
if (!num_mixers)
|
||||
|
@@ -1154,7 +1154,7 @@ static int _sde_connector_set_ext_hdr_info(
|
||||
|
||||
/* verify 1st header byte, programmed in DP Infoframe SDP header */
|
||||
if (payload_size < 1 || (payload[0] != HDR10_PLUS_VSIF_TYPE_CODE)) {
|
||||
SDE_ERROR_CONN(c_conn, "invalid payload detected, size: %d\n",
|
||||
SDE_ERROR_CONN(c_conn, "invalid payload detected, size: %zd\n",
|
||||
payload_size);
|
||||
rc = -EINVAL;
|
||||
goto end;
|
||||
|
@@ -216,10 +216,6 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we only need bandwidth check on real-time clients (interfaces) */
|
||||
if (sde_crtc_get_client_type(crtc) == NRT_CLIENT)
|
||||
return 0;
|
||||
|
||||
sde_cstate = to_sde_crtc_state(state);
|
||||
|
||||
/* obtain new values */
|
||||
|
@@ -449,6 +449,26 @@ static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int _sde_crtc_get_ctlstart_timeout(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
int rc = 0;
|
||||
|
||||
if (!crtc || !crtc->dev)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(encoder,
|
||||
&crtc->dev->mode_config.encoder_list, head) {
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_CMD)
|
||||
rc += sde_encoder_get_ctlstart_timeout_state(encoder);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
|
||||
struct sde_plane_state *pstate, struct sde_format *format)
|
||||
{
|
||||
@@ -886,6 +906,11 @@ static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
|
||||
u32 disp_bitmask = 0;
|
||||
int i;
|
||||
|
||||
if (!crtc || !state) {
|
||||
pr_err("Invalid crtc or state\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sde_crtc = to_sde_crtc(crtc);
|
||||
crtc_state = to_sde_crtc_state(state);
|
||||
|
||||
@@ -3132,7 +3157,13 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
if (unlikely(!sde_crtc->num_mixers))
|
||||
goto end;
|
||||
|
||||
_sde_crtc_blend_setup(crtc, old_state, true);
|
||||
if (_sde_crtc_get_ctlstart_timeout(crtc)) {
|
||||
_sde_crtc_blend_setup(crtc, old_state, false);
|
||||
SDE_ERROR("border fill only commit after ctlstart timeout\n");
|
||||
} else {
|
||||
_sde_crtc_blend_setup(crtc, old_state, true);
|
||||
}
|
||||
|
||||
_sde_crtc_dest_scaler_setup(crtc);
|
||||
|
||||
/* cancel the idle notify delayed work */
|
||||
@@ -4982,6 +5013,10 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
|
||||
catalog->perf.amortizable_threshold);
|
||||
sde_kms_info_add_keyint(info, "min_prefill_lines",
|
||||
catalog->perf.min_prefill_lines);
|
||||
sde_kms_info_add_keyint(info, "num_mnoc_ports",
|
||||
catalog->perf.num_mnoc_ports);
|
||||
sde_kms_info_add_keyint(info, "axi_bus_width",
|
||||
catalog->perf.axi_bus_width);
|
||||
sde_kms_info_add_keyint(info, "sec_ui_blendstage",
|
||||
catalog->sui_supported_blendstage);
|
||||
|
||||
|
@@ -218,6 +218,7 @@ struct sde_crtc_misr_info {
|
||||
* @output_fence : output release fence context
|
||||
* @stage_cfg : H/w mixer stage configuration
|
||||
* @debugfs_root : Parent of debugfs node
|
||||
* @priv_handle : Pointer to external private handle, if present
|
||||
* @vblank_cb_count : count of vblank callback since last reset
|
||||
* @play_count : frame count between crtc enable and disable
|
||||
* @vblank_cb_time : ktime at vblank count reset
|
||||
@@ -285,6 +286,7 @@ struct sde_crtc {
|
||||
|
||||
struct sde_hw_stage_cfg stage_cfg;
|
||||
struct dentry *debugfs_root;
|
||||
void *priv_handle;
|
||||
|
||||
u32 vblank_cb_count;
|
||||
u64 play_count;
|
||||
@@ -477,6 +479,17 @@ static inline int sde_crtc_get_mixer_height(struct sde_crtc *sde_crtc,
|
||||
cstate->ds_cfg[0].lm_height : mode->vdisplay);
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_crtc_get_num_datapath - get the number of datapath active
|
||||
* @crtc: Pointer to drm crtc object
|
||||
*/
|
||||
static inline int sde_crtc_get_num_datapath(struct drm_crtc *crtc)
|
||||
{
|
||||
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
|
||||
|
||||
return sde_crtc ? sde_crtc->num_mixers : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_crtc_frame_pending - retun the number of pending frames
|
||||
* @crtc: Pointer to drm crtc object
|
||||
|
@@ -166,6 +166,7 @@ enum sde_enc_rc_states {
|
||||
* @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
|
||||
* @bus_scaling_client: Client handle to the bus scaling interface
|
||||
* @te_source: vsync source pin information
|
||||
* @ops: Encoder ops from init function
|
||||
* @num_phys_encs: Actual number of physical encoders contained.
|
||||
* @phys_encs: Container of physical encoders managed.
|
||||
* @phys_vid_encs: Video physical encoders for panel mode switch.
|
||||
@@ -234,6 +235,8 @@ struct sde_encoder_virt {
|
||||
uint32_t display_num_of_h_tiles;
|
||||
uint32_t te_source;
|
||||
|
||||
struct sde_encoder_ops ops;
|
||||
|
||||
unsigned int num_phys_encs;
|
||||
struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
|
||||
struct sde_encoder_phys *phys_vid_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
|
||||
@@ -2849,7 +2852,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
|
||||
|
||||
if (phys) {
|
||||
if (!sde_enc->hw_pp[i]) {
|
||||
if (!sde_enc->hw_pp[i] && sde_enc->topology.num_intf) {
|
||||
SDE_ERROR_ENC(sde_enc,
|
||||
"invalid pingpong block for the encoder\n");
|
||||
return;
|
||||
@@ -3315,8 +3318,7 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
|
||||
struct sde_encoder_virt *sde_enc;
|
||||
|
||||
if (wb_enc) {
|
||||
if (sde_encoder_helper_reset_mixers(phys_enc,
|
||||
wb_enc->fb_disable))
|
||||
if (sde_encoder_helper_reset_mixers(phys_enc, NULL))
|
||||
return;
|
||||
|
||||
if (wb_enc->hw_wb->ops.bind_pingpong_blk) {
|
||||
@@ -3657,6 +3659,24 @@ int sde_encoder_idle_request(struct drm_encoder *drm_enc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sde_encoder_get_ctlstart_timeout_state(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct sde_encoder_virt *sde_enc = NULL;
|
||||
int i, count = 0;
|
||||
|
||||
if (!drm_enc)
|
||||
return 0;
|
||||
|
||||
sde_enc = to_sde_encoder_virt(drm_enc);
|
||||
|
||||
for (i = 0; i < sde_enc->num_phys_encs; i++) {
|
||||
count += atomic_read(&sde_enc->phys_encs[i]->ctlstart_timeout);
|
||||
atomic_set(&sde_enc->phys_encs[i]->ctlstart_timeout, 0);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* _sde_encoder_trigger_flush - trigger flush for a physical encoder
|
||||
* drm_enc: Pointer to drm encoder structure
|
||||
@@ -5357,6 +5377,23 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
|
||||
SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
|
||||
i, controller_id, phys_params.split_role);
|
||||
|
||||
if (sde_enc->ops.phys_init) {
|
||||
struct sde_encoder_phys *enc;
|
||||
|
||||
enc = sde_enc->ops.phys_init(intf_type,
|
||||
controller_id,
|
||||
&phys_params);
|
||||
if (enc) {
|
||||
sde_enc->phys_encs[sde_enc->num_phys_encs] =
|
||||
enc;
|
||||
++sde_enc->num_phys_encs;
|
||||
} else
|
||||
SDE_ERROR_ENC(sde_enc,
|
||||
"failed to add phys encs\n");
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (intf_type == INTF_WB) {
|
||||
phys_params.intf_idx = INTF_MAX;
|
||||
phys_params.wb_idx = sde_encoder_get_wb(
|
||||
@@ -5429,9 +5466,10 @@ static const struct drm_encoder_funcs sde_encoder_funcs = {
|
||||
.early_unregister = sde_encoder_early_unregister,
|
||||
};
|
||||
|
||||
struct drm_encoder *sde_encoder_init(
|
||||
struct drm_encoder *sde_encoder_init_with_ops(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info)
|
||||
struct msm_display_info *disp_info,
|
||||
const struct sde_encoder_ops *ops)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct sde_kms *sde_kms = to_sde_kms(priv->kms);
|
||||
@@ -5448,6 +5486,9 @@ struct drm_encoder *sde_encoder_init(
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ops)
|
||||
sde_enc->ops = *ops;
|
||||
|
||||
mutex_init(&sde_enc->enc_lock);
|
||||
ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
|
||||
&drm_enc_mode);
|
||||
@@ -5519,6 +5560,13 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_encoder *sde_encoder_init(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info)
|
||||
{
|
||||
return sde_encoder_init_with_ops(dev, disp_info, NULL);
|
||||
}
|
||||
|
||||
int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
|
||||
enum msm_event_wait event)
|
||||
{
|
||||
|
@@ -70,6 +70,22 @@ struct sde_encoder_kickoff_params {
|
||||
enum frame_trigger_mode_type frame_trigger_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct sde_encoder_ops - callback functions for generic sde encoder
|
||||
* Individual callbacks documented below.
|
||||
*/
|
||||
struct sde_encoder_ops {
|
||||
/**
|
||||
* phys_init - phys initialization function
|
||||
* @type: controller type
|
||||
* @controller_id: controller id
|
||||
* @phys_init_params: Pointer of structure sde_enc_phys_init_params
|
||||
* Returns: Pointer of sde_encoder_phys, NULL if failed
|
||||
*/
|
||||
void *(*phys_init)(enum sde_intf_type type,
|
||||
u32 controller_id, void *phys_init_params);
|
||||
};
|
||||
|
||||
/**
|
||||
* sde_encoder_get_hw_resources - Populate table of required hardware resources
|
||||
* @encoder: encoder pointer
|
||||
@@ -220,6 +236,18 @@ struct drm_encoder *sde_encoder_init(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info);
|
||||
|
||||
/**
|
||||
* sde_encoder_init_with_ops - initialize virtual encoder object with init ops
|
||||
* @dev: Pointer to drm device structure
|
||||
* @disp_info: Pointer to display information structure
|
||||
* @ops: Pointer to encoder ops structure
|
||||
* Returns: Pointer to newly created drm encoder
|
||||
*/
|
||||
struct drm_encoder *sde_encoder_init_with_ops(
|
||||
struct drm_device *dev,
|
||||
struct msm_display_info *disp_info,
|
||||
const struct sde_encoder_ops *ops);
|
||||
|
||||
/**
|
||||
* sde_encoder_destroy - destroy previously initialized virtual encoder
|
||||
* @drm_enc: Pointer to previously created drm encoder structure
|
||||
@@ -311,4 +339,11 @@ int sde_encoder_in_cont_splash(struct drm_encoder *enc);
|
||||
*/
|
||||
void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable);
|
||||
|
||||
/*
|
||||
* sde_encoder_get_ctlstart_timeout_state - checks if ctl start timeout happened
|
||||
* @drm_enc: Pointer to drm encoder structure
|
||||
* @Return: non zero value if ctl start timeout occurred
|
||||
*/
|
||||
int sde_encoder_get_ctlstart_timeout_state(struct drm_encoder *enc);
|
||||
|
||||
#endif /* __SDE_ENCODER_H__ */
|
||||
|
@@ -193,6 +193,7 @@ struct sde_encoder_phys_ops {
|
||||
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
|
||||
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
|
||||
* @INTR_IDX_WB_DONE: Writeback done interrupt for WB
|
||||
* @INTR_IDX_PP1_OVFL: Pingpong overflow interrupt on PP1 for Concurrent WB
|
||||
* @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB
|
||||
* @INTR_IDX_PP3_OVFL: Pingpong overflow interrupt on PP3 for Concurrent WB
|
||||
* @INTR_IDX_PP4_OVFL: Pingpong overflow interrupt on PP4 for Concurrent WB
|
||||
@@ -208,6 +209,7 @@ enum sde_intr_idx {
|
||||
INTR_IDX_RDPTR,
|
||||
INTR_IDX_AUTOREFRESH_DONE,
|
||||
INTR_IDX_WB_DONE,
|
||||
INTR_IDX_PP1_OVFL,
|
||||
INTR_IDX_PP2_OVFL,
|
||||
INTR_IDX_PP3_OVFL,
|
||||
INTR_IDX_PP4_OVFL,
|
||||
@@ -277,6 +279,7 @@ struct sde_encoder_irq {
|
||||
* @pending_retire_fence_cnt: Atomic counter tracking the pending retire
|
||||
* fences that have to be signalled.
|
||||
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
|
||||
* @ctlstart_timeout: Indicates if ctl start timeout occurred
|
||||
* @irq: IRQ tracking structures
|
||||
* @has_intf_te: Interface TE configuration support
|
||||
* @cont_splash_single_flush Variable to check if single flush is enabled.
|
||||
@@ -320,6 +323,7 @@ struct sde_encoder_phys {
|
||||
atomic_t pending_ctlstart_cnt;
|
||||
atomic_t pending_kickoff_cnt;
|
||||
atomic_t pending_retire_fence_cnt;
|
||||
atomic_t ctlstart_timeout;
|
||||
wait_queue_head_t pending_kickoff_wq;
|
||||
struct sde_encoder_irq irq[INTR_IDX_MAX];
|
||||
bool has_intf_te;
|
||||
|
@@ -217,6 +217,7 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
|
||||
SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
|
||||
atomic_add_unless(&phys_enc->pending_ctlstart_cnt,
|
||||
-1, 0);
|
||||
atomic_set(&phys_enc->ctlstart_timeout, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,6 +318,7 @@ static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
|
||||
|
||||
ctl = phys_enc->hw_ctl;
|
||||
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
|
||||
atomic_set(&phys_enc->ctlstart_timeout, 0);
|
||||
|
||||
time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
|
||||
|
||||
@@ -1283,6 +1285,7 @@ static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
|
||||
SDE_ERROR("invalid encoder\n");
|
||||
return;
|
||||
}
|
||||
atomic_set(&phys_enc->ctlstart_timeout, 0);
|
||||
SDE_DEBUG_CMDENC(cmd_enc, "pp %d intf %d state %d\n",
|
||||
phys_enc->hw_pp->idx - PINGPONG_0,
|
||||
phys_enc->hw_intf->idx - INTF_0,
|
||||
@@ -1455,6 +1458,7 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
|
||||
SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
|
||||
atomic_add_unless(
|
||||
&phys_enc->pending_ctlstart_cnt, -1, 0);
|
||||
atomic_inc_return(&phys_enc->ctlstart_timeout);
|
||||
}
|
||||
} else if ((ret == 0) &&
|
||||
(phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) &&
|
||||
@@ -1806,6 +1810,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
|
||||
atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
|
||||
atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0);
|
||||
atomic_set(&cmd_enc->pending_vblank_cnt, 0);
|
||||
atomic_set(&phys_enc->ctlstart_timeout, 0);
|
||||
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
|
||||
init_waitqueue_head(&cmd_enc->pending_vblank_wq);
|
||||
atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
|
||||
|
@@ -24,7 +24,7 @@
|
||||
|
||||
#define TO_S15D16(_x_) ((_x_) << 7)
|
||||
|
||||
static const u32 cwb_irq_tbl[PINGPONG_MAX] = {SDE_NONE, SDE_NONE,
|
||||
static const u32 cwb_irq_tbl[PINGPONG_MAX] = {SDE_NONE, INTR_IDX_PP1_OVFL,
|
||||
INTR_IDX_PP2_OVFL, INTR_IDX_PP3_OVFL, INTR_IDX_PP4_OVFL,
|
||||
INTR_IDX_PP5_OVFL, SDE_NONE, SDE_NONE};
|
||||
|
||||
@@ -142,7 +142,7 @@ static void sde_encoder_phys_wb_set_qos_remap(
|
||||
qos_params.client_type = phys_enc->in_clone_mode ?
|
||||
VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
|
||||
|
||||
SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d clone:%d\n",
|
||||
SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d clone:%d\n",
|
||||
qos_params.num,
|
||||
qos_params.vbif_idx,
|
||||
qos_params.xin_id, qos_params.client_type);
|
||||
@@ -1572,7 +1572,7 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (sde_encoder_helper_reset_mixers(phys_enc, wb_enc->fb_disable))
|
||||
if (sde_encoder_helper_reset_mixers(phys_enc, NULL))
|
||||
goto exit;
|
||||
|
||||
phys_enc->enable_state = SDE_ENC_DISABLING;
|
||||
@@ -1808,6 +1808,16 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
|
||||
irq->cb.arg = wb_enc;
|
||||
irq->cb.func = sde_encoder_phys_wb_done_irq;
|
||||
|
||||
irq = &phys_enc->irq[INTR_IDX_PP1_OVFL];
|
||||
INIT_LIST_HEAD(&irq->cb.list);
|
||||
irq->name = "pp1_overflow";
|
||||
irq->hw_idx = CWB_1;
|
||||
irq->irq_idx = -1;
|
||||
irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW;
|
||||
irq->intr_idx = INTR_IDX_PP1_OVFL;
|
||||
irq->cb.arg = wb_enc;
|
||||
irq->cb.func = sde_encoder_phys_cwb_ovflow;
|
||||
|
||||
irq = &phys_enc->irq[INTR_IDX_PP2_OVFL];
|
||||
INIT_LIST_HEAD(&irq->cb.list);
|
||||
irq->name = "pp2_overflow";
|
||||
|
@@ -130,6 +130,8 @@
|
||||
"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
|
||||
#define DEFAULT_MAX_PER_PIPE_BW 2400000
|
||||
#define DEFAULT_AMORTIZABLE_THRESHOLD 25
|
||||
#define DEFAULT_MNOC_PORTS 2
|
||||
#define DEFAULT_AXI_BUS_WIDTH 32
|
||||
#define DEFAULT_CPU_MASK 0
|
||||
#define DEFAULT_CPU_DMA_LATENCY PM_QOS_DEFAULT_VALUE
|
||||
|
||||
@@ -213,6 +215,8 @@ enum {
|
||||
PERF_CPU_DMA_LATENCY,
|
||||
PERF_QOS_LUT_MACROTILE_QSEED,
|
||||
PERF_SAFE_LUT_MACROTILE_QSEED,
|
||||
PERF_NUM_MNOC_PORTS,
|
||||
PERF_AXI_BUS_WIDTH,
|
||||
PERF_PROP_MAX,
|
||||
};
|
||||
|
||||
@@ -524,6 +528,10 @@ static struct sde_prop_type sde_perf_prop[] = {
|
||||
false, PROP_TYPE_U32_ARRAY},
|
||||
{PERF_SAFE_LUT_MACROTILE_QSEED, "qcom,sde-safe-lut-macrotile-qseed",
|
||||
false, PROP_TYPE_U32_ARRAY},
|
||||
{PERF_NUM_MNOC_PORTS, "qcom,sde-num-mnoc-ports",
|
||||
false, PROP_TYPE_U32},
|
||||
{PERF_AXI_BUS_WIDTH, "qcom,sde-axi-bus-width",
|
||||
false, PROP_TYPE_U32},
|
||||
};
|
||||
|
||||
static struct sde_prop_type sspp_prop[] = {
|
||||
@@ -740,6 +748,12 @@ static struct sde_prop_type merge_3d_prop[] = {
|
||||
{HW_OFF, "qcom,sde-merge-3d-off", false, PROP_TYPE_U32_ARRAY},
|
||||
{HW_LEN, "qcom,sde-merge-3d-size", false, PROP_TYPE_U32},
|
||||
};
|
||||
|
||||
static struct sde_prop_type qdss_prop[] = {
|
||||
{HW_OFF, "qcom,sde-qdss-off", false, PROP_TYPE_U32_ARRAY},
|
||||
{HW_LEN, "qcom,sde-qdss-size", false, PROP_TYPE_U32},
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
* static API list
|
||||
*************************************************************/
|
||||
@@ -3460,6 +3474,16 @@ static void _sde_perf_parse_dt_cfg_populate(struct sde_mdss_cfg *cfg,
|
||||
PROP_VALUE_ACCESS(prop_value,
|
||||
PERF_AMORTIZABLE_THRESHOLD, 0) :
|
||||
DEFAULT_AMORTIZABLE_THRESHOLD;
|
||||
cfg->perf.num_mnoc_ports =
|
||||
prop_exists[PERF_NUM_MNOC_PORTS] ?
|
||||
PROP_VALUE_ACCESS(prop_value,
|
||||
PERF_NUM_MNOC_PORTS, 0) :
|
||||
DEFAULT_MNOC_PORTS;
|
||||
cfg->perf.axi_bus_width =
|
||||
prop_exists[PERF_AXI_BUS_WIDTH] ?
|
||||
PROP_VALUE_ACCESS(prop_value,
|
||||
PERF_AXI_BUS_WIDTH, 0) :
|
||||
DEFAULT_AXI_BUS_WIDTH;
|
||||
}
|
||||
|
||||
static int _sde_perf_parse_dt_cfg(struct device_node *np,
|
||||
@@ -3575,23 +3599,23 @@ static int sde_parse_merge_3d_dt(struct device_node *np,
|
||||
|
||||
prop_value = kcalloc(HW_PROP_MAX, sizeof(struct sde_prop_value),
|
||||
GFP_KERNEL);
|
||||
if (!prop_value) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
if (!prop_value)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = _validate_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
|
||||
prop_count, &off_count);
|
||||
if (rc)
|
||||
goto error;
|
||||
goto end;
|
||||
|
||||
sde_cfg->merge_3d_count = off_count;
|
||||
|
||||
rc = _read_dt_entry(np, merge_3d_prop, ARRAY_SIZE(merge_3d_prop),
|
||||
prop_count,
|
||||
prop_exists, prop_value);
|
||||
if (rc)
|
||||
goto error;
|
||||
if (rc) {
|
||||
sde_cfg->merge_3d_count = 0;
|
||||
goto end;
|
||||
}
|
||||
|
||||
for (i = 0; i < off_count; i++) {
|
||||
merge_3d = sde_cfg->merge_3d + i;
|
||||
@@ -3602,11 +3626,54 @@ static int sde_parse_merge_3d_dt(struct device_node *np,
|
||||
merge_3d->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
sde_cfg->merge_3d_count = 0;
|
||||
end:
|
||||
kfree(prop_value);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int sde_qdss_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
|
||||
{
|
||||
int rc, prop_count[HW_PROP_MAX], i;
|
||||
struct sde_prop_value *prop_value = NULL;
|
||||
bool prop_exists[HW_PROP_MAX];
|
||||
u32 off_count;
|
||||
struct sde_qdss_cfg *qdss;
|
||||
|
||||
if (!sde_cfg) {
|
||||
SDE_ERROR("invalid argument\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
prop_value = kzalloc(HW_PROP_MAX *
|
||||
sizeof(struct sde_prop_value), GFP_KERNEL);
|
||||
if (!prop_value)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = _validate_dt_entry(np, qdss_prop, ARRAY_SIZE(qdss_prop),
|
||||
prop_count, &off_count);
|
||||
if (rc) {
|
||||
sde_cfg->qdss_count = 0;
|
||||
goto end;
|
||||
}
|
||||
|
||||
sde_cfg->qdss_count = off_count;
|
||||
|
||||
rc = _read_dt_entry(np, qdss_prop, ARRAY_SIZE(qdss_prop), prop_count,
|
||||
prop_exists, prop_value);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
for (i = 0; i < off_count; i++) {
|
||||
qdss = sde_cfg->qdss + i;
|
||||
qdss->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
|
||||
qdss->id = QDSS_0 + i;
|
||||
snprintf(qdss->name, SDE_HW_BLK_NAME_LEN, "qdss_%u",
|
||||
qdss->id - QDSS_0);
|
||||
qdss->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
|
||||
}
|
||||
|
||||
end:
|
||||
kfree(prop_value);
|
||||
fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -3906,6 +3973,19 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
|
||||
sde_cfg->true_inline_prefill_fudge_lines = 2;
|
||||
sde_cfg->true_inline_prefill_lines_nv12 = 32;
|
||||
sde_cfg->true_inline_prefill_lines = 48;
|
||||
} else if (IS_SDMTRINKET_TARGET(hw_rev)) {
|
||||
sde_cfg->has_cwb_support = true;
|
||||
sde_cfg->has_qsync = true;
|
||||
sde_cfg->perf.min_prefill_lines = 24;
|
||||
sde_cfg->vbif_qos_nlvl = 8;
|
||||
sde_cfg->ts_prefill_rev = 2;
|
||||
sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
|
||||
sde_cfg->delay_prg_fetch_start = true;
|
||||
sde_cfg->sui_ns_allowed = true;
|
||||
sde_cfg->sui_misr_supported = true;
|
||||
sde_cfg->sui_block_xin_mask = 0xC61;
|
||||
sde_cfg->has_hdr = false;
|
||||
sde_cfg->has_sui_blendstage = true;
|
||||
} else {
|
||||
SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
|
||||
sde_cfg->perf.min_prefill_lines = 0xffff;
|
||||
@@ -4125,6 +4205,10 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
rc = sde_qdss_parse_dt(np, sde_cfg);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
||||
rc = _sde_hardware_post_caps(sde_cfg, hw_rev);
|
||||
if (rc)
|
||||
goto end;
|
||||
|
@@ -49,6 +49,7 @@
|
||||
#define SDE_HW_VER_510 SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
|
||||
#define SDE_HW_VER_520 SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
|
||||
#define SDE_HW_VER_530 SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
|
||||
#define SDE_HW_VER_540 SDE_HW_VER(5, 4, 0) /* sdmtrinket v1.0 */
|
||||
#define SDE_HW_VER_600 SDE_HW_VER(6, 0, 0) /* kona */
|
||||
#define SDE_HW_VER_610 SDE_HW_VER(6, 1, 0) /* sm7250 */
|
||||
|
||||
@@ -60,6 +61,7 @@
|
||||
#define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510)
|
||||
#define IS_SDMMAGPIE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_520)
|
||||
#define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530)
|
||||
#define IS_SDMTRINKET_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_540)
|
||||
#define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
|
||||
#define IS_SAIPAN_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_610)
|
||||
|
||||
@@ -114,7 +116,8 @@ enum {
|
||||
SDE_HW_UBWC_VER_30 = SDE_HW_UBWC_VER(0x300),
|
||||
SDE_HW_UBWC_VER_40 = SDE_HW_UBWC_VER(0x400),
|
||||
};
|
||||
|
||||
#define IS_UBWC_10_SUPPORTED(rev) \
|
||||
IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_10)
|
||||
#define IS_UBWC_20_SUPPORTED(rev) \
|
||||
IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_20)
|
||||
#define IS_UBWC_30_SUPPORTED(rev) \
|
||||
@@ -974,6 +977,17 @@ struct sde_merge_3d_cfg {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct sde_qdss_cfg - information of qdss blocks
|
||||
* @id enum identifying this block
|
||||
* @base register offset of this block
|
||||
* @len: length of hardware block
|
||||
* @features bit mask identifying sub-blocks/features
|
||||
*/
|
||||
struct sde_qdss_cfg {
|
||||
SDE_HW_BLK_INFO;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
|
||||
* @pps pixel per seconds
|
||||
* @ot_limit OT limit to use up to specified pixel per second
|
||||
@@ -1127,6 +1141,8 @@ struct sde_sc_cfg {
|
||||
* @cdp_cfg cdp use case configurations
|
||||
* @cpu_mask: pm_qos cpu mask value
|
||||
* @cpu_dma_latency: pm_qos cpu dma latency value
|
||||
* @axi_bus_width: axi bus width value in bytes
|
||||
* @num_mnoc_ports: number of mnoc ports
|
||||
*/
|
||||
struct sde_perf_cfg {
|
||||
u32 max_bw_low;
|
||||
@@ -1153,6 +1169,8 @@ struct sde_perf_cfg {
|
||||
struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
|
||||
u32 cpu_mask;
|
||||
u32 cpu_dma_latency;
|
||||
u32 axi_bus_width;
|
||||
u32 num_mnoc_ports;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1335,6 +1353,9 @@ struct sde_mdss_cfg {
|
||||
u32 merge_3d_count;
|
||||
struct sde_merge_3d_cfg merge_3d[MAX_BLOCKS];
|
||||
|
||||
u32 qdss_count;
|
||||
struct sde_qdss_cfg qdss[MAX_BLOCKS];
|
||||
|
||||
/* Add additional block data structures here */
|
||||
|
||||
struct sde_perf_cfg perf;
|
||||
|
@@ -138,7 +138,7 @@ static const u32 cdm_flush_tbl[CDM_MAX] = {SDE_NONE, 0};
|
||||
/**
|
||||
* list of CWB bits in CTL_CWB_FLUSH
|
||||
*/
|
||||
static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2, 3,
|
||||
static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, 1, 2, 3,
|
||||
4, 5};
|
||||
|
||||
/**
|
||||
@@ -555,7 +555,7 @@ static inline int sde_hw_ctl_update_bitmask_cwb_v1(struct sde_hw_ctl *ctx,
|
||||
if (!ctx)
|
||||
return -EINVAL;
|
||||
|
||||
if ((cwb < CWB_2) || (cwb >= CWB_MAX)) {
|
||||
if ((cwb < CWB_1) || (cwb >= CWB_MAX)) {
|
||||
SDE_ERROR("Unsupported cwb %d\n", cwb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -126,6 +126,7 @@
|
||||
/**
|
||||
* Concurrent WB overflow interrupt status bit definitions
|
||||
*/
|
||||
#define SDE_INTR_CWB_1_OVERFLOW BIT(8)
|
||||
#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
|
||||
#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
|
||||
#define SDE_INTR_CWB_4_OVERFLOW BIT(20)
|
||||
|
@@ -286,7 +286,8 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
|
||||
IS_SM6150_TARGET(m->hwversion) ||
|
||||
IS_SDMMAGPIE_TARGET(m->hwversion) ||
|
||||
IS_KONA_TARGET(m->hwversion) ||
|
||||
IS_SAIPAN_TARGET(m->hwversion))
|
||||
IS_SAIPAN_TARGET(m->hwversion) ||
|
||||
IS_SDMTRINKET_TARGET(m->hwversion))
|
||||
ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
|
||||
else
|
||||
ops->setup_blend_config = sde_hw_lm_setup_blend_config;
|
||||
|
@@ -104,6 +104,7 @@ enum sde_hw_blk_type {
|
||||
SDE_HW_BLK_WB,
|
||||
SDE_HW_BLK_DSC,
|
||||
SDE_HW_BLK_MERGE_3D,
|
||||
SDE_HW_BLK_QDSS,
|
||||
SDE_HW_BLK_MAX,
|
||||
};
|
||||
|
||||
@@ -322,6 +323,11 @@ enum sde_merge_3d {
|
||||
MERGE_3D_MAX
|
||||
};
|
||||
|
||||
enum sde_qdss {
|
||||
QDSS_0,
|
||||
QDSS_MAX
|
||||
};
|
||||
|
||||
/**
|
||||
* SDE HW,Component order color map
|
||||
*/
|
||||
@@ -512,6 +518,7 @@ struct sde_mdss_color {
|
||||
#define SDE_DBG_MASK_REGDMA (1 << 14)
|
||||
#define SDE_DBG_MASK_UIDLE (1 << 15)
|
||||
#define SDE_DBG_MASK_SID (1 << 15)
|
||||
#define SDE_DBG_MASK_QDSS (1 << 16)
|
||||
|
||||
/**
|
||||
* struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
|
||||
|
100
msm/sde/sde_hw_qdss.c
Normal file
100
msm/sde/sde_hw_qdss.c
Normal file
@@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "sde_kms.h"
|
||||
#include "sde_dbg.h"
|
||||
#include "sde_hw_qdss.h"
|
||||
|
||||
#define QDSS_CONFIG 0x0
|
||||
|
||||
static struct sde_qdss_cfg *_qdss_offset(enum sde_qdss qdss,
|
||||
struct sde_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct sde_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->qdss_count; i++) {
|
||||
if (qdss == m->qdss[i].id) {
|
||||
b->base_off = addr;
|
||||
b->blk_off = m->qdss[i].base;
|
||||
b->length = m->qdss[i].len;
|
||||
b->hwversion = m->hwversion;
|
||||
b->log_mask = SDE_DBG_MASK_QDSS;
|
||||
return &m->qdss[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void sde_hw_qdss_enable_qdss_events(struct sde_hw_qdss *hw_qdss,
|
||||
bool enable)
|
||||
{
|
||||
struct sde_hw_blk_reg_map *c = &hw_qdss->hw;
|
||||
u32 val;
|
||||
|
||||
val = enable ? 0x100 : 0;
|
||||
|
||||
if (c)
|
||||
SDE_REG_WRITE(c, QDSS_CONFIG, val);
|
||||
}
|
||||
|
||||
static void _setup_qdss_ops(struct sde_hw_qdss_ops *ops)
|
||||
{
|
||||
ops->enable_qdss_events = sde_hw_qdss_enable_qdss_events;
|
||||
}
|
||||
|
||||
static struct sde_hw_blk_ops sde_hw_ops = {
|
||||
.start = NULL,
|
||||
.stop = NULL,
|
||||
};
|
||||
|
||||
struct sde_hw_qdss *sde_hw_qdss_init(enum sde_qdss idx,
|
||||
void __iomem *addr,
|
||||
struct sde_mdss_cfg *m)
|
||||
{
|
||||
struct sde_hw_qdss *c;
|
||||
struct sde_qdss_cfg *cfg;
|
||||
int rc;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _qdss_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
c->idx = idx;
|
||||
c->caps = cfg;
|
||||
_setup_qdss_ops(&c->ops);
|
||||
|
||||
rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_QDSS, idx, &sde_hw_ops);
|
||||
if (rc) {
|
||||
SDE_ERROR("failed to init hw blk %d\n", rc);
|
||||
kzfree(c);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
|
||||
c->hw.blk_off + c->hw.length, c->hw.xin_id);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
void sde_hw_qdss_destroy(struct sde_hw_qdss *qdss)
|
||||
{
|
||||
if (qdss)
|
||||
sde_hw_blk_destroy(&qdss->base);
|
||||
kfree(qdss);
|
||||
}
|
68
msm/sde/sde_hw_qdss.h
Normal file
68
msm/sde/sde_hw_qdss.h
Normal file
@@ -0,0 +1,68 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _SDE_HW_QDSS_H
|
||||
#define _SDE_HW_QDSS_H
|
||||
|
||||
#include "sde_hw_catalog.h"
|
||||
#include "sde_hw_mdss.h"
|
||||
#include "sde_hw_blk.h"
|
||||
#include "sde_hw_util.h"
|
||||
|
||||
struct sde_hw_qdss;
|
||||
|
||||
/**
|
||||
* struct sde_hw_qdss_ops - interface to the qdss hardware driver functions
|
||||
* Assumption is these functions will be called after clocks are enabled
|
||||
*/
|
||||
struct sde_hw_qdss_ops {
|
||||
/**
|
||||
* enable_qdss_events - enable qdss events
|
||||
* @hw_qdss: Pointer to qdss context
|
||||
*/
|
||||
void (*enable_qdss_events)(struct sde_hw_qdss *hw_qdss, bool enable);
|
||||
};
|
||||
|
||||
struct sde_hw_qdss {
|
||||
struct sde_hw_blk base;
|
||||
struct sde_hw_blk_reg_map hw;
|
||||
|
||||
/* qdss */
|
||||
enum sde_qdss idx;
|
||||
const struct sde_qdss_cfg *caps;
|
||||
|
||||
/* ops */
|
||||
struct sde_hw_qdss_ops ops;
|
||||
};
|
||||
|
||||
/**
|
||||
* to_sde_hw_qdss - convert base object sde_hw_base to container
|
||||
* @hw: Pointer to base hardware block
|
||||
* return: Pointer to hardware block container
|
||||
*/
|
||||
static inline struct sde_hw_qdss *to_sde_hw_qdss(struct sde_hw_blk *hw)
|
||||
{
|
||||
return container_of(hw, struct sde_hw_qdss, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_hw_qdss_init - initializes the qdss block for the passed qdss idx
|
||||
* @idx: QDSS index for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @m: Pointer to mdss catalog data
|
||||
* Returns: Error code or allocated sde_hw_qdss context
|
||||
*/
|
||||
struct sde_hw_qdss *sde_hw_qdss_init(enum sde_qdss idx,
|
||||
void __iomem *addr,
|
||||
struct sde_mdss_cfg *m);
|
||||
|
||||
/**
|
||||
* sde_hw_qdss_destroy - destroys qdss driver context
|
||||
* should be called to free the context
|
||||
* @qdss: Pointer to qdss driver context returned by sde_hw_qdss_init
|
||||
*/
|
||||
void sde_hw_qdss_destroy(struct sde_hw_qdss *qdss);
|
||||
|
||||
#endif /*_SDE_HW_QDSS_H */
|
@@ -360,6 +360,11 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
|
||||
if (IS_UBWC_40_SUPPORTED(ctx->catalog->ubwc_version)) {
|
||||
SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
|
||||
SDE_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
|
||||
} else if (IS_UBWC_10_SUPPORTED(ctx->catalog->ubwc_version)) {
|
||||
alpha_en_mask = const_alpha_en ? BIT(31) : 0;
|
||||
SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
|
||||
alpha_en_mask | (ctx->mdp->ubwc_swizzle & 0x1) |
|
||||
BIT(8) | (ctx->mdp->highest_bank_bit << 4));
|
||||
} else if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
|
||||
alpha_en_mask = const_alpha_en ? BIT(31) : 0;
|
||||
SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
|
||||
@@ -686,7 +691,8 @@ static void _sde_hw_sspp_setup_excl_rect(struct sde_hw_pipe *ctx,
|
||||
u32 size, xy;
|
||||
u32 idx;
|
||||
u32 reg_xy, reg_size;
|
||||
u32 excl_ctrl, enable_bit;
|
||||
u32 excl_ctrl = BIT(0);
|
||||
u32 enable_bit;
|
||||
|
||||
if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !excl_rect)
|
||||
return;
|
||||
@@ -706,7 +712,10 @@ static void _sde_hw_sspp_setup_excl_rect(struct sde_hw_pipe *ctx,
|
||||
xy = (excl_rect->y << 16) | (excl_rect->x);
|
||||
size = (excl_rect->h << 16) | (excl_rect->w);
|
||||
|
||||
excl_ctrl = SDE_REG_READ(c, SSPP_EXCL_REC_CTL + idx);
|
||||
/* Set if multi-rect disabled, read+modify only if multi-rect enabled */
|
||||
if (rect_index != SDE_SSPP_RECT_SOLO)
|
||||
excl_ctrl = SDE_REG_READ(c, SSPP_EXCL_REC_CTL + idx);
|
||||
|
||||
if (!size) {
|
||||
SDE_REG_WRITE(c, SSPP_EXCL_REC_CTL + idx,
|
||||
excl_ctrl & ~enable_bit);
|
||||
|
@@ -409,6 +409,8 @@ void sde_hw_reset_ubwc(struct sde_hw_mdp *mdp, struct sde_mdss_cfg *m)
|
||||
|
||||
if (IS_UBWC_30_SUPPORTED(m->ubwc_version))
|
||||
reg |= BIT(10);
|
||||
if (IS_UBWC_10_SUPPORTED(m->ubwc_version))
|
||||
reg |= BIT(8);
|
||||
|
||||
SDE_REG_WRITE(&c, UBWC_STATIC, reg);
|
||||
} else {
|
||||
|
@@ -235,7 +235,8 @@ static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
|
||||
if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
|
||||
ops->set_qos_remap = sde_hw_set_qos_remap;
|
||||
if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
|
||||
IS_SDMMAGPIE_TARGET(m->hwversion))
|
||||
IS_SDMMAGPIE_TARGET(m->hwversion) ||
|
||||
IS_SDMTRINKET_TARGET(m->hwversion))
|
||||
ops->set_mem_type = sde_hw_set_mem_type_v1;
|
||||
else
|
||||
ops->set_mem_type = sde_hw_set_mem_type;
|
||||
|
@@ -162,6 +162,11 @@ static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
|
||||
SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
|
||||
(ctx->mdp->ubwc_swizzle << 0) |
|
||||
(ctx->mdp->highest_bank_bit << 4));
|
||||
if (IS_UBWC_10_SUPPORTED(ctx->catalog->ubwc_version))
|
||||
SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
|
||||
(ctx->mdp->ubwc_swizzle << 0) |
|
||||
BIT(8) |
|
||||
(ctx->mdp->highest_bank_bit << 4));
|
||||
}
|
||||
|
||||
if (data->is_secure)
|
||||
|
123
msm/sde/sde_rm.c
123
msm/sde/sde_rm.c
@@ -16,6 +16,7 @@
|
||||
#include "sde_encoder.h"
|
||||
#include "sde_connector.h"
|
||||
#include "sde_hw_dsc.h"
|
||||
#include "sde_crtc.h"
|
||||
|
||||
#define RESERVED_BY_OTHER(h, r) \
|
||||
(((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ||\
|
||||
@@ -879,8 +880,8 @@ static int _sde_rm_reserve_lms(
|
||||
int i, rc = 0;
|
||||
|
||||
if (!reqs->topology->num_lm) {
|
||||
SDE_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
|
||||
return -EINVAL;
|
||||
SDE_DEBUG("invalid number of lm: %d\n", reqs->topology->num_lm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Find a primary mixer */
|
||||
@@ -994,6 +995,11 @@ static int _sde_rm_reserve_ctls(
|
||||
struct sde_rm_hw_iter iter;
|
||||
int i = 0;
|
||||
|
||||
if (!top->num_ctl) {
|
||||
SDE_DEBUG("invalid number of ctl: %d\n", top->num_ctl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
memset(&ctls, 0, sizeof(ctls));
|
||||
|
||||
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
|
||||
@@ -1551,9 +1557,23 @@ static int _sde_rm_populate_requirements(
|
||||
* Set the requirement for LM which has CWB support if CWB is
|
||||
* found enabled.
|
||||
*/
|
||||
if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc))
|
||||
if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc)) {
|
||||
reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_CWB);
|
||||
|
||||
/*
|
||||
* topology selection based on conn mode is not valid for CWB
|
||||
* as WB conn populates modes based on max_mixer_width check
|
||||
* but primary can be using dual LMs. This topology override for
|
||||
* CWB is to check number of datapath active in primary and
|
||||
* allocate same number of LM/PP blocks reserved for CWB
|
||||
*/
|
||||
reqs->topology =
|
||||
&rm->topology_tbl[SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE];
|
||||
if (sde_crtc_get_num_datapath(crtc_state->crtc) == 1)
|
||||
reqs->topology =
|
||||
&rm->topology_tbl[SDE_RM_TOPOLOGY_SINGLEPIPE];
|
||||
}
|
||||
|
||||
SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
|
||||
reqs->hw_res.display_num_of_h_tiles);
|
||||
SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
|
||||
@@ -1904,3 +1924,100 @@ end:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
|
||||
struct sde_hw_blk *hw, struct drm_encoder *enc)
|
||||
{
|
||||
struct sde_rm_hw_blk *blk;
|
||||
struct sde_rm_rsvp *rsvp;
|
||||
int ret = 0;
|
||||
|
||||
if (!rm || !hw || !enc) {
|
||||
SDE_ERROR("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hw->type >= SDE_HW_BLK_MAX) {
|
||||
SDE_ERROR("invalid HW type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&rm->rm_lock);
|
||||
|
||||
rsvp = _sde_rm_get_rsvp(rm, enc);
|
||||
if (!rsvp) {
|
||||
rsvp = kzalloc(sizeof(*rsvp), GFP_KERNEL);
|
||||
if (!rsvp) {
|
||||
ret = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
rsvp->seq = ++rm->rsvp_next_seq;
|
||||
rsvp->enc_id = enc->base.id;
|
||||
list_add_tail(&rsvp->list, &rm->rsvps);
|
||||
|
||||
SDE_DEBUG("create rsvp %d for enc %d\n",
|
||||
rsvp->seq, rsvp->enc_id);
|
||||
}
|
||||
|
||||
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
|
||||
if (!blk) {
|
||||
ret = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
blk->type = hw->type;
|
||||
blk->id = hw->id;
|
||||
blk->hw = hw;
|
||||
blk->rsvp = rsvp;
|
||||
list_add_tail(&blk->list, &rm->hw_blks[hw->type]);
|
||||
|
||||
SDE_DEBUG("create blk %d %d for rsvp %d enc %d\n", blk->type, blk->id,
|
||||
rsvp->seq, rsvp->enc_id);
|
||||
|
||||
end:
|
||||
mutex_unlock(&rm->rm_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sde_rm_ext_blk_destroy(struct sde_rm *rm,
|
||||
struct drm_encoder *enc)
|
||||
{
|
||||
struct sde_rm_hw_blk *blk = NULL, *p;
|
||||
struct sde_rm_rsvp *rsvp;
|
||||
enum sde_hw_blk_type type;
|
||||
int ret = 0;
|
||||
|
||||
if (!rm || !enc) {
|
||||
SDE_ERROR("invalid parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&rm->rm_lock);
|
||||
|
||||
rsvp = _sde_rm_get_rsvp(rm, enc);
|
||||
if (!rsvp) {
|
||||
ret = -ENOENT;
|
||||
SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
|
||||
goto end;
|
||||
}
|
||||
|
||||
for (type = 0; type < SDE_HW_BLK_MAX; type++) {
|
||||
list_for_each_entry_safe(blk, p, &rm->hw_blks[type], list) {
|
||||
if (blk->rsvp == rsvp) {
|
||||
list_del(&blk->list);
|
||||
SDE_DEBUG("del blk %d %d from rsvp %d enc %d\n",
|
||||
blk->type, blk->id,
|
||||
rsvp->seq, rsvp->enc_id);
|
||||
kfree(blk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SDE_DEBUG("del rsvp %d\n", rsvp->seq);
|
||||
list_del(&rsvp->list);
|
||||
kfree(rsvp);
|
||||
end:
|
||||
mutex_unlock(&rm->rm_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -247,13 +247,6 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
|
||||
*/
|
||||
bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw);
|
||||
|
||||
/**
|
||||
* sde_rm_check_property_topctl - validate property bitmask before it is set
|
||||
* @val: user's proposed topology control bitmask
|
||||
* @Return: 0 on success or error
|
||||
*/
|
||||
int sde_rm_check_property_topctl(uint64_t val);
|
||||
|
||||
/**
|
||||
* sde_rm_cont_splash_res_init - Read the current MDSS configuration
|
||||
* to update the splash data structure with the topology
|
||||
@@ -297,4 +290,26 @@ static inline bool sde_rm_topology_is_dual_ctl(struct sde_rm *rm,
|
||||
|
||||
return rm->topology_tbl[topology].num_ctl == DUAL_CTL;
|
||||
}
|
||||
|
||||
/**
|
||||
* sde_rm_ext_blk_create_reserve - Create external HW blocks
|
||||
* in resource manager and reserve for specific encoder.
|
||||
* @rm: SDE Resource Manager handle
|
||||
* @hw: external HW block
|
||||
* @drm_enc: DRM Encoder handle
|
||||
* @Return: 0 on Success otherwise -ERROR
|
||||
*/
|
||||
int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
|
||||
struct sde_hw_blk *hw,
|
||||
struct drm_encoder *enc);
|
||||
|
||||
/**
|
||||
* sde_rm_ext_blk_destroy - Given the encoder for the display chain, release
|
||||
* external HW blocks created for that.
|
||||
* @rm: SDE Resource Manager handle
|
||||
* @enc: DRM Encoder handle
|
||||
* @Return: 0 on Success otherwise -ERROR
|
||||
*/
|
||||
int sde_rm_ext_blk_destroy(struct sde_rm *rm,
|
||||
struct drm_encoder *enc);
|
||||
#endif /* __SDE_RM_H__ */
|
||||
|
@@ -238,6 +238,7 @@ static struct sde_dbg_base {
|
||||
u32 cur_evt_index;
|
||||
u32 dbgbus_dump_idx;
|
||||
u32 vbif_dbgbus_dump_idx;
|
||||
enum sde_dbg_dump_context dump_mode;
|
||||
} sde_dbg_base;
|
||||
|
||||
/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
|
||||
@@ -2827,6 +2828,17 @@ static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
|
||||
{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
|
||||
};
|
||||
|
||||
/**
|
||||
* _sde_power_check - check if power needs to enabled
|
||||
* @dump_mode: to check if power need to be enabled
|
||||
* Return: true if success; false otherwise
|
||||
*/
|
||||
static inline bool _sde_power_check(enum sde_dbg_dump_context dump_mode)
|
||||
{
|
||||
return (dump_mode == SDE_DBG_DUMP_CLK_ENABLED_CTX ||
|
||||
dump_mode == SDE_DBG_DUMP_IRQ_CTX) ? false : true;
|
||||
}
|
||||
|
||||
/**
|
||||
* _sde_dump_reg - helper function for dumping rotator register set content
|
||||
* @dump_name: register set name
|
||||
@@ -2838,8 +2850,7 @@ static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
|
||||
* @from_isr: whether being called from isr context
|
||||
*/
|
||||
static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
|
||||
char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem,
|
||||
bool from_isr)
|
||||
char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem)
|
||||
{
|
||||
u32 in_log, in_mem, len_align, len_padded;
|
||||
u32 *dump_addr = NULL;
|
||||
@@ -2887,7 +2898,7 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
|
||||
}
|
||||
}
|
||||
|
||||
if (!from_isr) {
|
||||
if (_sde_power_check(sde_dbg_base.dump_mode)) {
|
||||
rc = pm_runtime_get_sync(sde_dbg_base.dev);
|
||||
if (rc < 0) {
|
||||
pr_err("failed to enable power %d\n", rc);
|
||||
@@ -2919,7 +2930,7 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
|
||||
addr += REG_DUMP_ALIGN;
|
||||
}
|
||||
|
||||
if (!from_isr)
|
||||
if (_sde_power_check(sde_dbg_base.dump_mode))
|
||||
pm_runtime_put_sync(sde_dbg_base.dev);
|
||||
}
|
||||
|
||||
@@ -3022,7 +3033,7 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
|
||||
|
||||
_sde_dump_reg(range_node->range_name, reg_dump_flag,
|
||||
dbg->base, addr, len,
|
||||
&range_node->reg_dump, false);
|
||||
&range_node->reg_dump);
|
||||
}
|
||||
} else {
|
||||
/* If there is no list to dump ranges, dump all registers */
|
||||
@@ -3033,7 +3044,7 @@ static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
|
||||
addr = dbg->base;
|
||||
len = dbg->max_offset;
|
||||
_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
|
||||
&dbg->reg_dump, false);
|
||||
&dbg->reg_dump);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3441,7 +3452,7 @@ static void _sde_dump_work(struct work_struct *work)
|
||||
sde_dbg_base.dump_all, sde_dbg_base.dump_secure);
|
||||
}
|
||||
|
||||
void sde_dbg_dump(bool queue_work, const char *name, ...)
|
||||
void sde_dbg_dump(enum sde_dbg_dump_context dump_mode, const char *name, ...)
|
||||
{
|
||||
int i, index = 0;
|
||||
bool do_panic = false;
|
||||
@@ -3458,7 +3469,8 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
|
||||
if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_ALWAYS))
|
||||
return;
|
||||
|
||||
if (queue_work && work_pending(&sde_dbg_base.dump_work))
|
||||
if ((dump_mode == SDE_DBG_DUMP_IRQ_CTX) &&
|
||||
work_pending(&sde_dbg_base.dump_work))
|
||||
return;
|
||||
|
||||
blk_arr = &sde_dbg_base.req_dump_blks[0];
|
||||
@@ -3467,6 +3479,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
|
||||
memset(sde_dbg_base.req_dump_blks, 0,
|
||||
sizeof(sde_dbg_base.req_dump_blks));
|
||||
sde_dbg_base.dump_all = false;
|
||||
sde_dbg_base.dump_mode = dump_mode;
|
||||
|
||||
va_start(args, name);
|
||||
i = 0;
|
||||
@@ -3509,7 +3522,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...)
|
||||
}
|
||||
va_end(args);
|
||||
|
||||
if (queue_work) {
|
||||
if (dump_mode == SDE_DBG_DUMP_IRQ_CTX) {
|
||||
/* schedule work to dump later */
|
||||
sde_dbg_base.work_panic = do_panic;
|
||||
sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =
|
||||
|
@@ -42,6 +42,12 @@ enum sde_dbg_dump_flag {
|
||||
SDE_DBG_DUMP_IN_MEM = BIT(1),
|
||||
};
|
||||
|
||||
enum sde_dbg_dump_context {
|
||||
SDE_DBG_DUMP_PROC_CTX,
|
||||
SDE_DBG_DUMP_IRQ_CTX,
|
||||
SDE_DBG_DUMP_CLK_ENABLED_CTX,
|
||||
};
|
||||
|
||||
#define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
|
||||
|
||||
/*
|
||||
@@ -126,8 +132,8 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
|
||||
* Including the special name "panic" will trigger a panic after
|
||||
* the dumping work has completed.
|
||||
*/
|
||||
#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
|
||||
SDE_DBG_DUMP_DATA_LIMITER)
|
||||
#define SDE_DBG_DUMP(...) sde_dbg_dump(SDE_DBG_DUMP_PROC_CTX, __func__, \
|
||||
##__VA_ARGS__, SDE_DBG_DUMP_DATA_LIMITER)
|
||||
|
||||
/**
|
||||
* SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work
|
||||
@@ -137,8 +143,19 @@ extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
|
||||
* Including the special name "panic" will trigger a panic after
|
||||
* the dumping work has completed.
|
||||
*/
|
||||
#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
|
||||
SDE_DBG_DUMP_DATA_LIMITER)
|
||||
#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(SDE_DBG_DUMP_IRQ_CTX, __func__, \
|
||||
##__VA_ARGS__, SDE_DBG_DUMP_DATA_LIMITER)
|
||||
|
||||
/**
|
||||
* SDE_DBG_DUMP_CLK_EN - trigger dumping of all sde_dbg facilities, without clk
|
||||
* @va_args: list of named register dump ranges and regions to dump, as
|
||||
* registered previously through sde_dbg_reg_register_base and
|
||||
* sde_dbg_reg_register_dump_range.
|
||||
* Including the special name "panic" will trigger a panic after
|
||||
* the dumping work has completed.
|
||||
*/
|
||||
#define SDE_DBG_DUMP_CLK_EN(...) sde_dbg_dump(SDE_DBG_DUMP_CLK_ENABLED_CTX, \
|
||||
__func__, ##__VA_ARGS__, SDE_DBG_DUMP_DATA_LIMITER)
|
||||
|
||||
/**
|
||||
* SDE_DBG_EVT_CTRL - trigger a different driver events
|
||||
@@ -241,7 +258,7 @@ void sde_dbg_destroy(void);
|
||||
* the dumping work has completed.
|
||||
* Returns: none
|
||||
*/
|
||||
void sde_dbg_dump(bool queue_work, const char *name, ...);
|
||||
void sde_dbg_dump(enum sde_dbg_dump_context mode, const char *name, ...);
|
||||
|
||||
/**
|
||||
* sde_dbg_ctrl - trigger specific actions for the driver with debugging
|
||||
@@ -394,7 +411,8 @@ static inline void sde_dbg_destroy(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sde_dbg_dump(bool queue_work, const char *name, ...)
|
||||
static inline void sde_dbg_dump(enum sde_dbg_dump_context,
|
||||
const char *name, ...)
|
||||
{
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user