diff --git a/msm/sde/sde_crtc.c b/msm/sde/sde_crtc.c index fe087ae7bc..68b404c082 100644 --- a/msm/sde/sde_crtc.c +++ b/msm/sde/sde_crtc.c @@ -449,26 +449,6 @@ static bool sde_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -static int _sde_crtc_get_ctlstart_timeout(struct drm_crtc *crtc) -{ - struct drm_encoder *encoder; - int rc = 0; - - if (!crtc || !crtc->dev) - return 0; - - list_for_each_entry(encoder, - &crtc->dev->mode_config.encoder_list, head) { - if (encoder->crtc != crtc) - continue; - - if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) - rc += sde_encoder_get_ctlstart_timeout_state(encoder); - } - - return rc; -} - static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer, struct sde_plane_state *pstate, struct sde_format *format) { @@ -3157,13 +3137,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, if (unlikely(!sde_crtc->num_mixers)) goto end; - if (_sde_crtc_get_ctlstart_timeout(crtc)) { - _sde_crtc_blend_setup(crtc, old_state, false); - SDE_ERROR("border fill only commit after ctlstart timeout\n"); - } else { - _sde_crtc_blend_setup(crtc, old_state, true); - } - + _sde_crtc_blend_setup(crtc, old_state, true); _sde_crtc_dest_scaler_setup(crtc); /* cancel the idle notify delayed work */ diff --git a/msm/sde/sde_encoder.c b/msm/sde/sde_encoder.c index 9365a259c8..0eeda6ea28 100644 --- a/msm/sde/sde_encoder.c +++ b/msm/sde/sde_encoder.c @@ -3659,24 +3659,6 @@ int sde_encoder_idle_request(struct drm_encoder *drm_enc) return 0; } -int sde_encoder_get_ctlstart_timeout_state(struct drm_encoder *drm_enc) -{ - struct sde_encoder_virt *sde_enc = NULL; - int i, count = 0; - - if (!drm_enc) - return 0; - - sde_enc = to_sde_encoder_virt(drm_enc); - - for (i = 0; i < sde_enc->num_phys_encs; i++) { - count += atomic_read(&sde_enc->phys_encs[i]->ctlstart_timeout); - atomic_set(&sde_enc->phys_encs[i]->ctlstart_timeout, 0); - } - - return count; -} - /** * _sde_encoder_trigger_flush - trigger flush for a physical encoder * drm_enc: Pointer to drm encoder structure diff --git a/msm/sde/sde_encoder.h b/msm/sde/sde_encoder.h index 2a03edd8f0..7a86f7d93d 100644 --- a/msm/sde/sde_encoder.h +++ b/msm/sde/sde_encoder.h @@ -163,11 +163,11 @@ void sde_encoder_kickoff(struct drm_encoder *encoder, bool is_error); * @encoder: encoder pointer * @event: event to wait for * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending - * frames to hardware at a vblank or ctl_start + * frames to hardware at a vblank or wr_ptr_start * Encoders will map this differently depending on the * panel type. * vid mode -> vsync_irq - * cmd mode -> ctl_start + * cmd mode -> wr_ptr_start_irq * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to * the panel. Encoders will map this differently * depending on the panel type. @@ -339,11 +339,4 @@ int sde_encoder_in_cont_splash(struct drm_encoder *enc); */ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable); -/* - * sde_encoder_get_ctlstart_timeout_state - checks if ctl start timeout happened - * @drm_enc: Pointer to drm encoder structure - * @Return: non zero value if ctl start timeout occurred - */ -int sde_encoder_get_ctlstart_timeout_state(struct drm_encoder *enc); - #endif /* __SDE_ENCODER_H__ */ diff --git a/msm/sde/sde_encoder_phys.h b/msm/sde/sde_encoder_phys.h index 3a4b516fcb..e43eefc6f9 100644 --- a/msm/sde/sde_encoder_phys.h +++ b/msm/sde/sde_encoder_phys.h @@ -189,9 +189,9 @@ struct sde_encoder_phys_ops { /** * enum sde_intr_idx - sde encoder interrupt index * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel - * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel - * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel - * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel + * @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel + * @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel + * @INTR_IDX_RDPTR: Readpointer done interrupt for cmd mode panel * @INTR_IDX_WB_DONE: Writeback done interrupt for WB * @INTR_IDX_PP1_OVFL: Pingpong overflow interrupt on PP1 for Concurrent WB * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB @@ -200,6 +200,7 @@ struct sde_encoder_phys_ops { * @INTR_IDX_PP5_OVFL: Pingpong overflow interrupt on PP5 for Concurrent WB * @INTR_IDX_AUTOREFRESH_DONE: Autorefresh done for cmd mode panel meaning * autorefresh has triggered a double buffer flip + * @INTR_IDX_WRPTR: Writepointer start interrupt for cmd mode panel */ enum sde_intr_idx { INTR_IDX_VSYNC, @@ -214,6 +215,7 @@ enum sde_intr_idx { INTR_IDX_PP3_OVFL, INTR_IDX_PP4_OVFL, INTR_IDX_PP5_OVFL, + INTR_IDX_WRPTR, INTR_IDX_MAX, }; @@ -274,12 +276,9 @@ struct sde_encoder_irq { * vs. the number of done/vblank irqs. Should hover * between 0-2 Incremented when a new kickoff is * scheduled. Decremented in irq handler - * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start - * pending. * @pending_retire_fence_cnt: Atomic counter tracking the pending retire * fences that have to be signalled. * @pending_kickoff_wq: Wait queue for blocking until kickoff completes - * @ctlstart_timeout: Indicates if ctl start timeout occurred * @irq: IRQ tracking structures * @has_intf_te: Interface TE configuration support * @cont_splash_single_flush Variable to check if single flush is enabled. @@ -320,10 +319,8 @@ struct sde_encoder_phys { atomic_t wbirq_refcount; atomic_t vsync_cnt; atomic_t underrun_cnt; - atomic_t pending_ctlstart_cnt; atomic_t pending_kickoff_cnt; atomic_t pending_retire_fence_cnt; - atomic_t ctlstart_timeout; wait_queue_head_t pending_kickoff_wq; struct sde_encoder_irq irq[INTR_IDX_MAX]; bool has_intf_te; @@ -336,7 +333,6 @@ struct sde_encoder_phys { static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys) { - atomic_inc_return(&phys->pending_ctlstart_cnt); return atomic_inc_return(&phys->pending_kickoff_cnt); } @@ -373,24 +369,16 @@ struct sde_encoder_phys_cmd_autorefresh { * @stream_sel: Stream selection for multi-stream interfaces * @pp_timeout_report_cnt: number of pingpong done irq timeout errors * @autorefresh: autorefresh feature state - * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be - * signaled at the next rd_ptr_irq - * @rd_ptr_timestamp: last rd_ptr_irq timestamp * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK * @pending_vblank_wq: Wait queue for blocking until VBLANK received - * @ctl_start_threshold: A threshold in microseconds allows command mode - * engine to trigger the retire fence without waiting for rd_ptr. */ struct sde_encoder_phys_cmd { struct sde_encoder_phys base; int stream_sel; int pp_timeout_report_cnt; struct sde_encoder_phys_cmd_autorefresh autorefresh; - atomic_t pending_rd_ptr_cnt; - ktime_t rd_ptr_timestamp; atomic_t pending_vblank_cnt; wait_queue_head_t pending_vblank_wq; - u32 ctl_start_threshold; }; /** diff --git a/msm/sde/sde_encoder_phys_cmd.c b/msm/sde/sde_encoder_phys_cmd.c index e29551a9cc..38cb63b5e8 100644 --- a/msm/sde/sde_encoder_phys_cmd.c +++ b/msm/sde/sde_encoder_phys_cmd.c @@ -35,12 +35,6 @@ #define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000 -/* - * Threshold for signalling retire fences in cases where - * CTL_START_IRQ is received just after RD_PTR_IRQ - */ -#define SDE_ENC_CTL_START_THRESHOLD_US 500 - #define SDE_ENC_MAX_POLL_TIMEOUT_US 2000 static inline int _sde_encoder_phys_cmd_get_idle_timeout( @@ -203,24 +197,6 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx) SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, new_cnt, event); - /* - * Reduce the refcount for the retire fence as well as for the ctl_start - * if the counters are greater than zero. Signal retire fence if there - * was a retire fence count pending and kickoff count is zero. - */ - if (sde_encoder_phys_cmd_is_master(phys_enc) && (new_cnt == 0)) { - while (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, - -1, 0)) { - if (phys_enc->parent_ops.handle_frame_done) - phys_enc->parent_ops.handle_frame_done( - phys_enc->parent, phys_enc, - SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE); - atomic_add_unless(&phys_enc->pending_ctlstart_cnt, - -1, 0); - atomic_set(&phys_enc->ctlstart_timeout, 0); - } - } - /* Signal any waiting atomic commit thread */ wake_up_all(&phys_enc->pending_kickoff_wq); SDE_ATRACE_END("pp_done_irq"); @@ -255,7 +231,7 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx) { struct sde_encoder_phys *phys_enc = arg; struct sde_encoder_phys_cmd *cmd_enc; - u32 event = 0, scheduler_status = INVALID_CTL_STATUS; + u32 scheduler_status = INVALID_CTL_STATUS; struct sde_hw_ctl *ctl; struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}}; @@ -266,102 +242,53 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx) cmd_enc = to_sde_encoder_phys_cmd(phys_enc); ctl = phys_enc->hw_ctl; - /** - * signal only for master, when the ctl_start irq is - * done and incremented the pending_rd_ptr_cnt. - */ - if (sde_encoder_phys_cmd_is_master(phys_enc) - && atomic_add_unless(&cmd_enc->pending_rd_ptr_cnt, -1, 0) - && atomic_add_unless( - &phys_enc->pending_retire_fence_cnt, -1, 0)) { - - event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE; - if (phys_enc->parent_ops.handle_frame_done) - phys_enc->parent_ops.handle_frame_done( - phys_enc->parent, phys_enc, event); - } - if (ctl && ctl->ops.get_scheduler_status) scheduler_status = ctl->ops.get_scheduler_status(ctl); sde_encoder_helper_get_pp_line_count(phys_enc->parent, info); SDE_EVT32_IRQ(DRMID(phys_enc->parent), info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count, - event, scheduler_status, - info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count); + info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count, + scheduler_status); if (phys_enc->parent_ops.handle_vblank_virt) phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent, phys_enc); - cmd_enc->rd_ptr_timestamp = ktime_get(); - atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0); wake_up_all(&cmd_enc->pending_vblank_wq); SDE_ATRACE_END("rd_ptr_irq"); } -static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) +static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx) { struct sde_encoder_phys *phys_enc = arg; - struct sde_encoder_phys_cmd *cmd_enc; struct sde_hw_ctl *ctl; u32 event = 0; - s64 time_diff_us; struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}}; if (!phys_enc || !phys_enc->hw_ctl) return; - SDE_ATRACE_BEGIN("ctl_start_irq"); - cmd_enc = to_sde_encoder_phys_cmd(phys_enc); - + SDE_ATRACE_BEGIN("wr_ptr_irq"); ctl = phys_enc->hw_ctl; - atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); - atomic_set(&phys_enc->ctlstart_timeout, 0); - time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp); - - /* handle retire fence based on only master */ - if (sde_encoder_phys_cmd_is_master(phys_enc) - && atomic_read(&phys_enc->pending_retire_fence_cnt)) { - /** - * Handle rare cases where the ctl_start_irq is received - * after rd_ptr_irq. If it falls within a threshold, it is - * guaranteed the frame would be picked up in the current TE. - * Signal retire fence immediately in such case. The threshold - * timer adds extra line time duration based on lowest panel - * fps for qsync enabled case. - */ - if ((time_diff_us <= cmd_enc->ctl_start_threshold) - && atomic_add_unless( - &phys_enc->pending_retire_fence_cnt, -1, 0)) { - - event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE; - - if (phys_enc->parent_ops.handle_frame_done) - phys_enc->parent_ops.handle_frame_done( + if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) { + event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE; + if (phys_enc->parent_ops.handle_frame_done) + phys_enc->parent_ops.handle_frame_done( phys_enc->parent, phys_enc, event); - - /** - * In ideal cases, ctl_start_irq is received before the - * rd_ptr_irq, so set the atomic flag to indicate the event - * and rd_ptr_irq will handle signalling the retire fence - */ - } else { - atomic_inc(&cmd_enc->pending_rd_ptr_cnt); - } } sde_encoder_helper_get_pp_line_count(phys_enc->parent, info); SDE_EVT32_IRQ(DRMID(phys_enc->parent), - ctl->idx - CTL_0, time_diff_us, event, + ctl->idx - CTL_0, event, info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count, info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count); - /* Signal any waiting ctl start interrupt */ + /* Signal any waiting wr_ptr start interrupt */ wake_up_all(&phys_enc->pending_kickoff_wq); - SDE_ATRACE_END("ctl_start_irq"); + SDE_ATRACE_END("wr_ptr_irq"); } static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx) @@ -417,6 +344,14 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx( irq->hw_idx = phys_enc->hw_intf->idx; else irq->hw_idx = phys_enc->hw_pp->idx; + + irq = &phys_enc->irq[INTR_IDX_WRPTR]; + irq->irq_idx = -EINVAL; + if (phys_enc->has_intf_te) + irq->hw_idx = phys_enc->hw_intf->idx; + else + irq->hw_idx = phys_enc->hw_pp->idx; + } static void sde_encoder_phys_cmd_cont_splash_mode_set( @@ -534,17 +469,6 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout( cmd_enc->pp_timeout_report_cnt++; pending_kickoff_cnt = atomic_read(&phys_enc->pending_kickoff_cnt); - if (sde_encoder_phys_cmd_is_master(phys_enc)) { - /* trigger the retire fence if it was missed */ - if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, - -1, 0)) - phys_enc->parent_ops.handle_frame_done( - phys_enc->parent, - phys_enc, - SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE); - atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); - } - SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, cmd_enc->pp_timeout_report_cnt, pending_kickoff_cnt, @@ -879,12 +803,11 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc, if (enable) { sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG); - sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN); sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true); if (sde_encoder_phys_cmd_is_master(phys_enc)) { sde_encoder_helper_register_irq(phys_enc, - INTR_IDX_CTL_START); + INTR_IDX_WRPTR); sde_encoder_helper_register_irq(phys_enc, INTR_IDX_AUTOREFRESH_DONE); } @@ -892,12 +815,11 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc, } else { if (sde_encoder_phys_cmd_is_master(phys_enc)) { sde_encoder_helper_unregister_irq(phys_enc, - INTR_IDX_CTL_START); + INTR_IDX_WRPTR); sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_AUTOREFRESH_DONE); } - sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN); sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false); sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG); } @@ -1053,33 +975,31 @@ static void sde_encoder_phys_cmd_tearcheck_config( tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; tc_cfg.start_pos = mode->vdisplay; tc_cfg.rd_ptr_irq = mode->vdisplay + 1; - - cmd_enc->ctl_start_threshold = (extra_frame_trigger_time / 1000) + - SDE_ENC_CTL_START_THRESHOLD_US; + tc_cfg.wr_ptr_irq = 1; SDE_DEBUG_CMDENC(cmd_enc, - "tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n", + "tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_intf->idx - INTF_0, vsync_hz, mode->vtotal, mode->vrefresh); SDE_DEBUG_CMDENC(cmd_enc, - "tc %d intf %d enable %u start_pos %u rd_ptr_irq %u\n", + "tc %d intf %d enable %u start_pos %u rd_ptr_irq %u wr_ptr_irq %u\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_intf->idx - INTF_0, - tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq); + tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq, + tc_cfg.wr_ptr_irq); SDE_DEBUG_CMDENC(cmd_enc, - "tc %d intf %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n", + "tc %d intf %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_intf->idx - INTF_0, tc_cfg.hw_vsync_mode, tc_cfg.vsync_count, tc_cfg.vsync_init_val); SDE_DEBUG_CMDENC(cmd_enc, - "tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u ctl_start_threshold:%d\n", + "tc %d intf %d cfgheight %u thresh_start %u thresh_cont %u\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_intf->idx - INTF_0, tc_cfg.sync_cfg_height, - tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue, - cmd_enc->ctl_start_threshold); + tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue); if (phys_enc->has_intf_te) { phys_enc->hw_intf->ops.setup_tearcheck(phys_enc->hw_intf, @@ -1285,7 +1205,6 @@ static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc) SDE_ERROR("invalid encoder\n"); return; } - atomic_set(&phys_enc->ctlstart_timeout, 0); SDE_DEBUG_CMDENC(cmd_enc, "pp %d intf %d state %d\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_intf->idx - INTF_0, @@ -1363,6 +1282,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, atomic_read(&phys_enc->pending_kickoff_cnt), atomic_read(&cmd_enc->autorefresh.kickoff_cnt)); + phys_enc->frame_trigger_mode = params->frame_trigger_mode; if (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_DEFAULT) { /* @@ -1390,12 +1310,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( else if (phys_enc->hw_pp->ops.update_tearcheck) phys_enc->hw_pp->ops.update_tearcheck( phys_enc->hw_pp, &tc_cfg); - - cmd_enc->ctl_start_threshold = - (extra_frame_trigger_time / 1000) + - SDE_ENC_CTL_START_THRESHOLD_US; - SDE_EVT32(DRMID(phys_enc->parent), - tc_cfg.sync_threshold_start, cmd_enc->ctl_start_threshold); + SDE_EVT32(DRMID(phys_enc->parent), tc_cfg.sync_threshold_start); } SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n", @@ -1404,7 +1319,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( return ret; } -static int _sde_encoder_phys_cmd_wait_for_ctl_start( +static int _sde_encoder_phys_cmd_wait_for_wr_ptr( struct sde_encoder_phys *phys_enc) { struct sde_encoder_phys_cmd *cmd_enc = @@ -1421,14 +1336,14 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start( ctl = phys_enc->hw_ctl; wait_info.wq = &phys_enc->pending_kickoff_wq; - wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt; + wait_info.atomic_cnt = &phys_enc->pending_retire_fence_cnt; wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; /* slave encoder doesn't enable for ppsplit */ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc)) return 0; - ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START, + ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WRPTR, &wait_info); if (ret == -ETIMEDOUT) { struct sde_hw_ctl *ctl = phys_enc->hw_ctl; @@ -1438,28 +1353,24 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start( if (frame_pending) SDE_ERROR_CMDENC(cmd_enc, - "ctl start interrupt wait failed\n"); + "wr_ptrt start interrupt wait failed\n"); else ret = 0; - if (sde_encoder_phys_cmd_is_master(phys_enc)) { - /* - * Signaling the retire fence at ctl start timeout - * to allow the next commit and avoid device freeze. - * As ctl start timeout can occurs due to no read ptr, - * updating pending_rd_ptr_cnt here may not cover all - * cases. Hence signaling the retire fence. - */ - if (atomic_add_unless( - &phys_enc->pending_retire_fence_cnt, -1, 0)) - phys_enc->parent_ops.handle_frame_done( - phys_enc->parent, - phys_enc, - SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE); - atomic_add_unless( - &phys_enc->pending_ctlstart_cnt, -1, 0); - atomic_inc_return(&phys_enc->ctlstart_timeout); - } + /* + * Signaling the retire fence at wr_ptr timeout + * to allow the next commit and avoid device freeze. + * As wr_ptr timeout can occurs due to no read ptr, + * updating pending_rd_ptr_cnt here may not cover all + * cases. Hence signaling the retire fence. + */ + if (sde_encoder_phys_cmd_is_master(phys_enc) && + atomic_add_unless(&phys_enc->pending_retire_fence_cnt, + -1, 0)) + phys_enc->parent_ops.handle_frame_done( + phys_enc->parent, phys_enc, + SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE); + } else if ((ret == 0) && (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_POSTED_START) && atomic_read(&phys_enc->pending_kickoff_cnt) && @@ -1511,7 +1422,7 @@ static int sde_encoder_phys_cmd_wait_for_commit_done( /* only required for master controller */ if (sde_encoder_phys_cmd_is_master(phys_enc)) - rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc); + rc = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc); if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) && cmd_enc->autorefresh.cfg.enable) @@ -1746,7 +1657,6 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( phys_enc->enc_spinlock = p->enc_spinlock; phys_enc->vblank_ctl_lock = p->vblank_ctl_lock; cmd_enc->stream_sel = 0; - cmd_enc->ctl_start_threshold = SDE_ENC_CTL_START_THRESHOLD_US; phys_enc->enable_state = SDE_ENC_DISABLED; sde_encoder_phys_cmd_init_ops(&phys_enc->ops); phys_enc->comp_type = p->comp_type; @@ -1768,7 +1678,7 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( irq->name = "ctl_start"; irq->intr_type = SDE_IRQ_TYPE_CTL_START; irq->intr_idx = INTR_IDX_CTL_START; - irq->cb.func = sde_encoder_phys_cmd_ctl_start_irq; + irq->cb.func = NULL; irq = &phys_enc->irq[INTR_IDX_PINGPONG]; irq->name = "pp_done"; @@ -1804,13 +1714,20 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE; irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq; + irq = &phys_enc->irq[INTR_IDX_WRPTR]; + irq->intr_idx = INTR_IDX_WRPTR; + irq->name = "wr_ptr"; + + if (phys_enc->has_intf_te) + irq->intr_type = SDE_IRQ_TYPE_INTF_TEAR_WR_PTR; + else + irq->intr_type = SDE_IRQ_TYPE_PING_PONG_WR_PTR; + irq->cb.func = sde_encoder_phys_cmd_wr_ptr_irq; + atomic_set(&phys_enc->vblank_refcount, 0); atomic_set(&phys_enc->pending_kickoff_cnt, 0); - atomic_set(&phys_enc->pending_ctlstart_cnt, 0); atomic_set(&phys_enc->pending_retire_fence_cnt, 0); - atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0); atomic_set(&cmd_enc->pending_vblank_cnt, 0); - atomic_set(&phys_enc->ctlstart_timeout, 0); init_waitqueue_head(&phys_enc->pending_kickoff_wq); init_waitqueue_head(&cmd_enc->pending_vblank_wq); atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0); diff --git a/msm/sde/sde_hw_intf.c b/msm/sde/sde_hw_intf.c index 9f3c9ba9df..454d47cbe4 100644 --- a/msm/sde/sde_hw_intf.c +++ b/msm/sde/sde_hw_intf.c @@ -485,6 +485,7 @@ static int sde_hw_intf_setup_te_config(struct sde_hw_intf *intf, SDE_REG_WRITE(c, INTF_TEAR_SYNC_CONFIG_HEIGHT, te->sync_cfg_height); SDE_REG_WRITE(c, INTF_TEAR_VSYNC_INIT_VAL, te->vsync_init_val); SDE_REG_WRITE(c, INTF_TEAR_RD_PTR_IRQ, te->rd_ptr_irq); + SDE_REG_WRITE(c, INTF_TEAR_WR_PTR_IRQ, te->wr_ptr_irq); SDE_REG_WRITE(c, INTF_TEAR_START_POS, te->start_pos); SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH, ((te->sync_threshold_continue << 16) | diff --git a/msm/sde/sde_hw_mdss.h b/msm/sde/sde_hw_mdss.h index 551ac4f89c..4694e1abc3 100644 --- a/msm/sde/sde_hw_mdss.h +++ b/msm/sde/sde_hw_mdss.h @@ -647,6 +647,7 @@ struct sde_splash_data { * needs to be above the read pointer * @start_pos: The position from which the start_threshold value is added * @rd_ptr_irq: The read pointer line at which interrupt has to be generated + * @wr_ptr_irq: The write pointer line at which interrupt has to be generated * @hw_vsync_mode: Sync with external frame sync input */ struct sde_hw_tear_check { @@ -657,6 +658,7 @@ struct sde_hw_tear_check { u32 sync_threshold_continue; u32 start_pos; u32 rd_ptr_irq; + u32 wr_ptr_irq; u8 hw_vsync_mode; }; diff --git a/msm/sde/sde_hw_pingpong.c b/msm/sde/sde_hw_pingpong.c index 36a394f4b6..e7f92eafe0 100644 --- a/msm/sde/sde_hw_pingpong.c +++ b/msm/sde/sde_hw_pingpong.c @@ -176,6 +176,7 @@ static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp, SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height); SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val); SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq); + SDE_REG_WRITE(c, PP_WR_PTR_IRQ, te->wr_ptr_irq); SDE_REG_WRITE(c, PP_START_POS, te->start_pos); SDE_REG_WRITE(c, PP_SYNC_THRESH, ((te->sync_threshold_continue << 16) |