diff --git a/msm/dsi/dsi_catalog.c b/msm/dsi/dsi_catalog.c index 37a790cc65..495b4b180e 100644 --- a/msm/dsi/dsi_catalog.c +++ b/msm/dsi/dsi_catalog.c @@ -30,6 +30,7 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl, ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_cmn_reset_cmd_fifo; ctrl->ops.trigger_command_dma = dsi_ctrl_hw_cmn_trigger_command_dma; ctrl->ops.get_interrupt_status = dsi_ctrl_hw_cmn_get_interrupt_status; + ctrl->ops.poll_dma_status = dsi_ctrl_hw_cmn_poll_dma_status; ctrl->ops.get_error_status = dsi_ctrl_hw_cmn_get_error_status; ctrl->ops.clear_error_status = dsi_ctrl_hw_cmn_clear_error_status; ctrl->ops.clear_interrupt_status = diff --git a/msm/dsi/dsi_catalog.h b/msm/dsi/dsi_catalog.h index 7ca0dac0b2..1e02beb50c 100644 --- a/msm/dsi/dsi_catalog.h +++ b/msm/dsi/dsi_catalog.h @@ -128,6 +128,7 @@ void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy, /* DSI controller common ops */ u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl); +u32 dsi_ctrl_hw_cmn_poll_dma_status(struct dsi_ctrl_hw *ctrl); void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints); void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints); diff --git a/msm/dsi/dsi_ctrl.c b/msm/dsi/dsi_ctrl.c index ee04db9155..fc2e8b362d 100644 --- a/msm/dsi/dsi_ctrl.c +++ b/msm/dsi/dsi_ctrl.c @@ -370,33 +370,13 @@ dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl, return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain); } -static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl) -{ - /* - * If a command is triggered right after another command, - * check if the previous command transfer is completed. If - * transfer is done, cancel any work that has been - * queued. Otherwise wait till the work is scheduled and - * completed before triggering the next command by - * flushing the workqueue. - */ - if (atomic_read(&dsi_ctrl->dma_irq_trig)) { - cancel_work_sync(&dsi_ctrl->dma_cmd_wait); - } else { - flush_workqueue(dsi_ctrl->dma_cmd_workq); - SDE_EVT32(SDE_EVTLOG_FUNC_CASE2); - } -} - -static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work) +static void dsi_ctrl_dma_cmd_wait_for_done(struct dsi_ctrl *dsi_ctrl) { int ret = 0; - struct dsi_ctrl *dsi_ctrl = NULL; u32 status; u32 mask = DSI_CMD_MODE_DMA_DONE; struct dsi_ctrl_hw_ops dsi_hw_ops; - dsi_ctrl = container_of(work, struct dsi_ctrl, dma_cmd_wait); dsi_hw_ops = dsi_ctrl->hw.ops; SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY); @@ -405,7 +385,7 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work) * so the wait is not needed. */ if (atomic_read(&dsi_ctrl->dma_irq_trig)) - goto done; + return; ret = wait_for_completion_timeout( &dsi_ctrl->irq_info.cmd_dma_done, @@ -416,9 +396,11 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work) status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE); dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw, status); + SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_CASE1); DSI_CTRL_WARN(dsi_ctrl, "dma_tx done but irq not triggered\n"); } else { + SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_ERROR); DSI_CTRL_ERR(dsi_ctrl, "Command transfer failed\n"); } @@ -426,8 +408,110 @@ static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work) DSI_SINT_CMD_MODE_DMA_DONE); } -done: - dsi_ctrl->dma_wait_queued = false; +} + +/** + * dsi_ctrl_clear_dma_status - API to clear DMA status + * @dsi_ctrl: DSI controller handle. + */ +static void dsi_ctrl_clear_dma_status(struct dsi_ctrl *dsi_ctrl) +{ + struct dsi_ctrl_hw_ops dsi_hw_ops; + u32 status = 0; + + if (!dsi_ctrl) { + DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n"); + return; + } + + + dsi_hw_ops = dsi_ctrl->hw.ops; + + status = dsi_hw_ops.poll_dma_status(&dsi_ctrl->hw); + SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, status); + + status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE); + dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw, status); + +} + +static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl) +{ + int rc = 0; + struct dsi_clk_ctrl_info clk_info; + u32 mask = BIT(DSI_FIFO_OVERFLOW); + + mutex_lock(&dsi_ctrl->ctrl_lock); + + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, dsi_ctrl->cell_index, dsi_ctrl->pending_cmd_flags); + + /* In case of broadcast messages, we poll on the slave controller. */ + if ((dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_BROADCAST) && + !(dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_BROADCAST_MASTER)) { + dsi_ctrl_clear_dma_status(dsi_ctrl); + } else { + dsi_ctrl_dma_cmd_wait_for_done(dsi_ctrl); + } + + /* Command engine disable, unmask overflow, remove vote on clocks and gdsc */ + rc = dsi_ctrl_set_cmd_engine_state(dsi_ctrl, DSI_CTRL_ENGINE_OFF, false); + if (rc) + DSI_CTRL_ERR(dsi_ctrl, "failed to disable command engine\n"); + + if (dsi_ctrl->pending_cmd_flags & DSI_CTRL_CMD_READ) + mask |= BIT(DSI_FIFO_UNDERFLOW); + + dsi_ctrl_mask_error_status_interrupts(dsi_ctrl, mask, false); + + mutex_unlock(&dsi_ctrl->ctrl_lock); + + clk_info.client = DSI_CLK_REQ_DSI_CLIENT; + clk_info.clk_type = DSI_ALL_CLKS; + clk_info.clk_state = DSI_CLK_OFF; + + rc = dsi_ctrl->clk_cb.dsi_clk_cb(dsi_ctrl->clk_cb.priv, clk_info); + if (rc) + DSI_CTRL_ERR(dsi_ctrl, "failed to disable clocks\n"); + + (void)pm_runtime_put_sync(dsi_ctrl->drm_dev->dev); + +} + +static void dsi_ctrl_post_cmd_transfer_work(struct work_struct *work) +{ + struct dsi_ctrl *dsi_ctrl = NULL; + + dsi_ctrl = container_of(work, struct dsi_ctrl, post_cmd_tx_work); + + dsi_ctrl_post_cmd_transfer(dsi_ctrl); + dsi_ctrl->post_tx_queued = false; +} + +static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl) +{ + /* + * If a command is triggered right after another command, + * check if the previous command transfer is completed. If + * transfer is done, cancel any work that has been + * queued. Otherwise wait till the work is scheduled and + * completed before triggering the next command by + * flushing the workqueue. + * + * cancel_work_sync returns true if the work has not yet been scheduled, in that case as + * we are cancelling the work we need to explicitly call the post_cmd_transfer API to + * clean up the states. + */ + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY); + + if (atomic_read(&dsi_ctrl->dma_irq_trig)) { + if (cancel_work_sync(&dsi_ctrl->post_cmd_tx_work)) { + dsi_ctrl_post_cmd_transfer(dsi_ctrl); + dsi_ctrl->post_tx_queued = false; + } + } else { + flush_workqueue(dsi_ctrl->post_cmd_tx_workq); + SDE_EVT32(SDE_EVTLOG_FUNC_CASE2); + } } static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, @@ -437,7 +521,7 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, int rc = 0; struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state; - SDE_EVT32(dsi_ctrl->cell_index, op, op_state); + SDE_EVT32_VERBOSE(dsi_ctrl->cell_index, op, op_state); switch (op) { case DSI_CTRL_OP_POWER_STATE_CHANGE: @@ -1414,7 +1498,6 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl, if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) { dsi_ctrl_wait_for_video_done(dsi_ctrl); - dsi_ctrl_mask_overflow(dsi_ctrl, true); atomic_set(&dsi_ctrl->dma_irq_trig, 0); dsi_ctrl_enable_status_interrupt(dsi_ctrl, @@ -1449,16 +1532,6 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl, dsi_ctrl->cmd_trigger_frame); } - if (flags & DSI_CTRL_CMD_ASYNC_WAIT) { - dsi_ctrl->dma_wait_queued = true; - queue_work(dsi_ctrl->dma_cmd_workq, - &dsi_ctrl->dma_cmd_wait); - } else { - dsi_ctrl->dma_wait_queued = false; - dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait); - } - - dsi_ctrl_mask_overflow(dsi_ctrl, false); dsi_hw_ops.reset_cmd_fifo(&dsi_ctrl->hw); @@ -1500,9 +1573,7 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd_de goto error; } - SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags); - if (dsi_ctrl->dma_wait_queued) - dsi_ctrl_flush_cmd_dma_queue(dsi_ctrl); + SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, *flags); if (*flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) { cmd_mem.offset = dsi_ctrl->cmd_buffer_iova; @@ -1545,13 +1616,11 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd_de * as specified by HW limitations. Need to overwrite the flags to * set the LAST_COMMAND flag to ensure no command transfer failures. */ - if ((*flags & DSI_CTRL_CMD_FETCH_MEMORY) && - (*flags & DSI_CTRL_CMD_BROADCAST)) { - if ((dsi_ctrl->cmd_len + length) > 240) { - dsi_ctrl_mask_overflow(dsi_ctrl, true); + if ((*flags & DSI_CTRL_CMD_FETCH_MEMORY) && (*flags & DSI_CTRL_CMD_BROADCAST)) { + if (((dsi_ctrl->cmd_len + length) > 240) && !(*flags & DSI_CTRL_CMD_LAST_COMMAND)) { *flags |= DSI_CTRL_CMD_LAST_COMMAND; - SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_CASE1, - flags); + SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_CASE1, *flags); + dsi_ctrl_transfer_prepare(dsi_ctrl, *flags); } } @@ -1734,6 +1803,10 @@ static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd_de rc); goto error; } + + /* Wait for read command transfer success */ + dsi_ctrl_dma_cmd_wait_for_done(dsi_ctrl); + /* * wait before reading rdbk_data register, if any delay is * required after sending the read command. @@ -2058,7 +2131,7 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev) dsi_ctrl->irq_info.irq_num = -1; dsi_ctrl->irq_info.irq_stat_mask = 0x0; - INIT_WORK(&dsi_ctrl->dma_cmd_wait, dsi_ctrl_dma_cmd_wait_for_done); + INIT_WORK(&dsi_ctrl->post_cmd_tx_work, dsi_ctrl_post_cmd_transfer_work); atomic_set(&dsi_ctrl->dma_irq_trig, 0); spin_lock_init(&dsi_ctrl->irq_info.irq_lock); @@ -3336,6 +3409,79 @@ int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl, return rc; } +/** + * dsi_ctrl_transfer_prepare() - Set up a command transfer + * @dsi_ctrl: DSI controller handle. + * @flags: Controller flags of the command. + * + * Command transfer requires command engine to be enabled, along with + * clock votes and masking the overflow bits. + * + * Return: error code. + */ +int dsi_ctrl_transfer_prepare(struct dsi_ctrl *dsi_ctrl, u32 flags) +{ + int rc = 0; + struct dsi_clk_ctrl_info clk_info; + u32 mask = BIT(DSI_FIFO_OVERFLOW); + + if (!dsi_ctrl) + return -EINVAL; + + if (!(flags & DSI_CTRL_CMD_LAST_COMMAND)) + return rc; + + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, dsi_ctrl->cell_index, flags); + + /* Vote for clocks, gdsc, enable command engine, mask overflow */ + rc = pm_runtime_get_sync(dsi_ctrl->drm_dev->dev); + if (rc < 0) { + DSI_CTRL_ERR(dsi_ctrl, "failed gdsc voting\n"); + return rc; + } + + clk_info.client = DSI_CLK_REQ_DSI_CLIENT; + clk_info.clk_type = DSI_ALL_CLKS; + clk_info.clk_state = DSI_CLK_ON; + + rc = dsi_ctrl->clk_cb.dsi_clk_cb(dsi_ctrl->clk_cb.priv, clk_info); + if (rc) { + DSI_CTRL_ERR(dsi_ctrl, "failed to enable clocks\n"); + goto error_disable_gdsc; + } + + /* Wait till any previous ASYNC waits are scheduled and completed */ + if (dsi_ctrl->post_tx_queued) + dsi_ctrl_flush_cmd_dma_queue(dsi_ctrl); + + mutex_lock(&dsi_ctrl->ctrl_lock); + + if (flags & DSI_CTRL_CMD_READ) + mask |= BIT(DSI_FIFO_UNDERFLOW); + + dsi_ctrl_mask_error_status_interrupts(dsi_ctrl, mask, true); + + rc = dsi_ctrl_set_cmd_engine_state(dsi_ctrl, DSI_CTRL_ENGINE_ON, false); + if (rc) { + DSI_CTRL_ERR(dsi_ctrl, "failed to enable command engine: %d\n", rc); + mutex_unlock(&dsi_ctrl->ctrl_lock); + goto error_disable_clks; + } + + mutex_unlock(&dsi_ctrl->ctrl_lock); + + return rc; + +error_disable_clks: + clk_info.clk_state = DSI_CLK_OFF; + (void)dsi_ctrl->clk_cb.dsi_clk_cb(dsi_ctrl->clk_cb.priv, clk_info); + +error_disable_gdsc: + (void)pm_runtime_put_sync(dsi_ctrl->drm_dev->dev); + + return rc; +} + /** * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link * @dsi_ctrl: DSI controller handle. @@ -3360,13 +3506,6 @@ int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd) mutex_lock(&dsi_ctrl->ctrl_lock); - rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0); - if (rc) { - DSI_CTRL_ERR(dsi_ctrl, "Controller state check failed, rc=%d\n", - rc); - goto error; - } - if (cmd->ctrl_flags & DSI_CTRL_CMD_READ) { rc = dsi_message_rx(dsi_ctrl, cmd); if (rc <= 0) @@ -3381,30 +3520,39 @@ int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd) dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0); -error: mutex_unlock(&dsi_ctrl->ctrl_lock); return rc; } /** - * dsi_ctrl_mask_overflow() - API to mask/unmask overflow error. - * @dsi_ctrl: DSI controller handle. - * @enable: variable to control masking/unmasking. + * dsi_ctrl_transfer_unprepare() - Clean up post a command transfer + * @dsi_ctrl: DSI controller handle. + * @flags: Controller flags of the command + * + * After the DSI controller has been programmed to trigger a DCS command + * the post transfer API is used to check for success and clean up the + * resources. Depending on the controller flags, this check is either + * scheduled on the same thread or queued. + * */ -void dsi_ctrl_mask_overflow(struct dsi_ctrl *dsi_ctrl, bool enable) +void dsi_ctrl_transfer_unprepare(struct dsi_ctrl *dsi_ctrl, u32 flags) { - struct dsi_ctrl_hw_ops dsi_hw_ops; + if (!dsi_ctrl) + return; - dsi_hw_ops = dsi_ctrl->hw.ops; + if (!(flags & DSI_CTRL_CMD_LAST_COMMAND)) + return; - if (enable) { - if (dsi_hw_ops.mask_error_intr) - dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw, - BIT(DSI_FIFO_OVERFLOW), true); + SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, dsi_ctrl->cell_index, flags); + + dsi_ctrl->pending_cmd_flags = flags; + + if (flags & DSI_CTRL_CMD_ASYNC_WAIT) { + dsi_ctrl->post_tx_queued = true; + queue_work(dsi_ctrl->post_cmd_tx_workq, &dsi_ctrl->post_cmd_tx_work); } else { - if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway) - dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw, - BIT(DSI_FIFO_OVERFLOW), false); + dsi_ctrl->post_tx_queued = false; + dsi_ctrl_post_cmd_transfer(dsi_ctrl); } } @@ -3512,15 +3660,6 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags) dsi_ctrl->cmd_trigger_frame); } - if (flags & DSI_CTRL_CMD_ASYNC_WAIT) { - dsi_ctrl->dma_wait_queued = true; - queue_work(dsi_ctrl->dma_cmd_workq, - &dsi_ctrl->dma_cmd_wait); - } else { - dsi_ctrl->dma_wait_queued = false; - dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait); - } - if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) { if (dsi_ctrl->version < DSI_CTRL_VERSION_2_4) dsi_hw_ops.soft_reset(&dsi_ctrl->hw); @@ -3745,10 +3884,21 @@ int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl, return -EINVAL; } + if (state == DSI_CTRL_ENGINE_ON) { + if (dsi_ctrl->cmd_engine_refcount > 0) { + dsi_ctrl->cmd_engine_refcount++; + goto error; + } + } else { + if (dsi_ctrl->cmd_engine_refcount > 1) { + dsi_ctrl->cmd_engine_refcount--; + goto error; + } + } + rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state); if (rc) { - DSI_CTRL_ERR(dsi_ctrl, "Controller state check failed, rc=%d\n", - rc); + DSI_CTRL_ERR(dsi_ctrl, "Controller state check failed, rc=%d\n", rc); goto error; } @@ -3759,11 +3909,17 @@ int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl, dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false); } + if (state == DSI_CTRL_ENGINE_ON) + dsi_ctrl->cmd_engine_refcount++; + else + dsi_ctrl->cmd_engine_refcount = 0; + SDE_EVT32(dsi_ctrl->cell_index, state, skip_op); - DSI_CTRL_DEBUG(dsi_ctrl, "Set cmd engine state:%d, skip_op:%d\n", - state, skip_op); + dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state); error: + DSI_CTRL_DEBUG(dsi_ctrl, "Set cmd engine state:%d, skip_op:%d, enable count: %d\n", + state, skip_op, dsi_ctrl->cmd_engine_refcount); return rc; } @@ -4009,7 +4165,6 @@ void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx, * Mask DSI error status interrupts and clear error status * register */ - mutex_lock(&dsi_ctrl->ctrl_lock); if (idx & BIT(DSI_ERR_INTR_ALL)) { /* * The behavior of mask_enable is different in ctrl register @@ -4027,7 +4182,6 @@ void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx, dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, DSI_ERROR_INTERRUPT_COUNT); } - mutex_unlock(&dsi_ctrl->ctrl_lock); } /** diff --git a/msm/dsi/dsi_ctrl.h b/msm/dsi/dsi_ctrl.h index d251d40846..f0ef3167bb 100644 --- a/msm/dsi/dsi_ctrl.h +++ b/msm/dsi/dsi_ctrl.h @@ -219,11 +219,10 @@ struct dsi_ctrl_interrupts { * @vaddr: CPU virtual address of cmd buffer. * @secure_mode: Indicates if secure-session is in progress * @esd_check_underway: Indicates if esd status check is in progress - * @dma_cmd_wait: Work object waiting on DMA command transfer done. - * @dma_cmd_workq: Pointer to the workqueue of DMA command transfer done - * wait sequence. - * @dma_wait_queued: Indicates if any DMA command transfer wait work - * is queued. + * @post_cmd_tx_work: Work object to clean up post command transfer. + * @post_cmd_tx_workq: Pointer to the workqueue of post command transfer work. + * @post_tx_queued: Indicates if any DMA command post transfer work + * is queued. * @dma_irq_trig: Atomic state to indicate DMA done IRQ * triggered. * @debugfs_root: Root for debugfs entries. @@ -251,6 +250,8 @@ struct dsi_ctrl_interrupts { * which command transfer is successful. * @cmd_success_frame: unsigned integer that indicates the frame at * which command transfer is successful. + * @cmd_engine_refcount: Reference count enforcing single instance of cmd engine + * @pending_cmd_flags: Flags associated with command that is currently being txed or pending. */ struct dsi_ctrl { struct platform_device *pdev; @@ -289,9 +290,9 @@ struct dsi_ctrl { void *vaddr; bool secure_mode; bool esd_check_underway; - struct work_struct dma_cmd_wait; - struct workqueue_struct *dma_cmd_workq; - bool dma_wait_queued; + struct work_struct post_cmd_tx_work; + struct workqueue_struct *post_cmd_tx_workq; + bool post_tx_queued; atomic_t dma_irq_trig; /* Debug Information */ @@ -317,6 +318,8 @@ struct dsi_ctrl { u32 cmd_trigger_frame; u32 cmd_success_line; u32 cmd_success_frame; + u32 cmd_engine_refcount; + u32 pending_cmd_flags; }; /** @@ -578,6 +581,18 @@ int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi, */ int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on); +/** + * dsi_ctrl_transfer_prepare() - Set up a command transfer + * @dsi_ctrl: DSI controller handle. + * @flags: Controller flags of the command. + * + * Command transfer requires command engine to be enabled, along with + * clock votes and masking the overflow bits. + * + * Return: error code. + */ +int dsi_ctrl_transfer_prepare(struct dsi_ctrl *dsi_ctrl, u32 flags); + /** * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link * @dsi_ctrl: DSI controller handle. @@ -592,6 +607,19 @@ int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on); */ int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl, struct dsi_cmd_desc *cmd); +/** + * dsi_ctrl_transfer_unprepare() - Clean up post a command transfer + * @dsi_ctrl: DSI controller handle. + * @flags: Controller flags of the command + * + * After the DSI controller has been programmed to trigger a DCS command + * the post transfer API is used to check for success and clean up the + * resources. Depending on the controller flags, this check is either + * scheduled on the same thread or queued. + * + */ +void dsi_ctrl_transfer_unprepare(struct dsi_ctrl *dsi_ctrl, u32 flags); + /** * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command. * @dsi_ctrl: DSI controller handle. @@ -892,11 +920,4 @@ int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl); */ int dsi_ctrl_get_io_resources(struct msm_io_res *io_res); -/** - * dsi_ctrl_mask_overflow() - API to mask/unmask overflow errors. - * @dsi_ctrl: DSI controller handle. - * @enable: variable to control masking/unmasking. - */ -void dsi_ctrl_mask_overflow(struct dsi_ctrl *dsi_ctrl, bool enable); - #endif /* _DSI_CTRL_H_ */ diff --git a/msm/dsi/dsi_ctrl_hw.h b/msm/dsi/dsi_ctrl_hw.h index a1a7cf1bc9..ea50619612 100644 --- a/msm/dsi/dsi_ctrl_hw.h +++ b/msm/dsi/dsi_ctrl_hw.h @@ -643,6 +643,12 @@ struct dsi_ctrl_hw_ops { */ void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints); + /** + * poll_dma_status()- API to poll DMA status + * @ctrl: Pointer to the controller host hardware. + */ + u32 (*poll_dma_status)(struct dsi_ctrl_hw *ctrl); + /** * enable_status_interrupts() - enable the specified interrupts * @ctrl: Pointer to the controller host hardware. diff --git a/msm/dsi/dsi_ctrl_hw_cmn.c b/msm/dsi/dsi_ctrl_hw_cmn.c index f48c35d909..ef39458947 100644 --- a/msm/dsi/dsi_ctrl_hw_cmn.c +++ b/msm/dsi/dsi_ctrl_hw_cmn.c @@ -945,6 +945,29 @@ u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl, return rx_byte; } +/** + * poll_dma_status() - API to poll DMA status + * @ctrl: Pointer to the controller host hardware. + * + * Return: DMA status. + */ +u32 dsi_ctrl_hw_cmn_poll_dma_status(struct dsi_ctrl_hw *ctrl) +{ + int rc = 0; + u32 status; + u32 const delay_us = 10; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(ctrl->base + DSI_INT_CTRL, status, + ((status & DSI_CMD_MODE_DMA_DONE) > 0), delay_us, timeout_us); + if (rc) { + DSI_CTRL_HW_DBG(ctrl, "CMD_MODE_DMA_DONE failed\n"); + status = 0; + } + + return status; +} + /** * get_interrupt_status() - returns the interrupt status * @ctrl: Pointer to the controller host hardware. diff --git a/msm/dsi/dsi_display.c b/msm/dsi/dsi_display.c index b87d1cad91..33375008e9 100644 --- a/msm/dsi/dsi_display.c +++ b/msm/dsi/dsi_display.c @@ -78,9 +78,14 @@ static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display, display_for_each_ctrl(i, display) { ctrl = &display->ctrl[i]; - if (!ctrl) + if ((!ctrl) || (!ctrl->ctrl)) continue; + + mutex_lock(&ctrl->ctrl->ctrl_lock); + dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl, mask, enable); + + mutex_unlock(&ctrl->ctrl->ctrl_lock); } } @@ -253,26 +258,11 @@ int dsi_display_set_backlight(struct drm_connector *connector, DSI_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n", bl_scale, bl_scale_sv, (u32)bl_temp); - rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, - DSI_CORE_CLK, DSI_CLK_ON); - if (rc) { - DSI_ERR("[%s] failed to enable DSI core clocks, rc=%d\n", - dsi_display->name, rc); - goto error; - } rc = dsi_panel_set_backlight(panel, (u32)bl_temp); if (rc) DSI_ERR("unable to set backlight\n"); - rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, - DSI_CORE_CLK, DSI_CLK_OFF); - if (rc) { - DSI_ERR("[%s] failed to disable DSI core clocks, rc=%d\n", - dsi_display->name, rc); - goto error; - } - error: mutex_unlock(&panel->panel_lock); return rc; @@ -288,11 +278,6 @@ static int dsi_display_cmd_engine_enable(struct dsi_display *display) m_ctrl = &display->ctrl[display->cmd_master_idx]; mutex_lock(&m_ctrl->ctrl->ctrl_lock); - if (display->cmd_engine_refcount > 0) { - display->cmd_engine_refcount++; - goto done; - } - rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON, skip_op); if (rc) { @@ -316,7 +301,6 @@ static int dsi_display_cmd_engine_enable(struct dsi_display *display) } } - display->cmd_engine_refcount++; goto done; error_disable_master: (void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, @@ -336,14 +320,6 @@ static int dsi_display_cmd_engine_disable(struct dsi_display *display) m_ctrl = &display->ctrl[display->cmd_master_idx]; mutex_lock(&m_ctrl->ctrl->ctrl_lock); - if (display->cmd_engine_refcount == 0) { - DSI_ERR("[%s] Invalid refcount\n", display->name); - goto done; - } else if (display->cmd_engine_refcount > 1) { - display->cmd_engine_refcount--; - goto done; - } - display_for_each_ctrl(i, display) { ctrl = &display->ctrl[i]; if (!ctrl->ctrl || (ctrl == m_ctrl)) @@ -359,15 +335,10 @@ static int dsi_display_cmd_engine_disable(struct dsi_display *display) rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF, skip_op); - if (rc) { + if (rc) DSI_ERR("[%s] disable mcmd engine failed, skip_op:%d rc:%d\n", display->name, skip_op, rc); - goto error; - } -error: - display->cmd_engine_refcount = 0; -done: mutex_unlock(&m_ctrl->ctrl->ctrl_lock); return rc; } @@ -754,7 +725,6 @@ static void dsi_display_set_cmd_tx_ctrl_flags(struct dsi_display *display, * 1) queue_cmd_waits is set by connector and * - commands are not sent using DSI FIFO memory * - commands are not sent in non-embedded mode - * - not a video mode panel * - no explicit msg post_wait_ms is specified * - not a read command * 2) if async override msg flag is present @@ -762,7 +732,6 @@ static void dsi_display_set_cmd_tx_ctrl_flags(struct dsi_display *display, if (display->queue_cmd_waits) if (!(flags & DSI_CTRL_CMD_FIFO_STORE) && !(flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) && - !(display->panel->panel_mode == DSI_OP_VIDEO_MODE) && (cmd->post_wait_ms == 0) && !(cmd->ctrl_flags & DSI_CTRL_CMD_READ)) flags |= DSI_CTRL_CMD_ASYNC_WAIT; @@ -812,15 +781,22 @@ static int dsi_display_read_status(struct dsi_display_ctrl *ctrl, cmds[i].msg.rx_len = config->status_cmds_rlen[i]; cmds[i].ctrl_flags = flags; dsi_display_set_cmd_tx_ctrl_flags(display,&cmds[i]); - rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i]); - if (rc <= 0) { - DSI_ERR("rx cmd transfer failed rc=%d\n", rc); + rc = dsi_ctrl_transfer_prepare(ctrl->ctrl, cmds[i].ctrl_flags); + if (rc) { + DSI_ERR("prepare for rx cmd transfer failed rc=%d\n", rc); return rc; } - memcpy(config->return_buf + start, - config->status_buf, lenp[i]); - start += lenp[i]; + rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i]); + if (rc <= 0) { + DSI_ERR("rx cmd transfer failed rc=%d\n", rc); + } else { + memcpy(config->return_buf + start, + config->status_buf, lenp[i]); + start += lenp[i]; + } + + dsi_ctrl_transfer_unprepare(ctrl->ctrl, cmds[i].ctrl_flags); } return rc; @@ -867,21 +843,15 @@ static int dsi_display_status_reg_read(struct dsi_display *display) } } - rc = dsi_display_cmd_engine_enable(display); - if (rc) { - DSI_ERR("cmd engine enable failed\n"); - return -EPERM; - } - rc = dsi_display_validate_status(m_ctrl, display); if (rc <= 0) { DSI_ERR("[%s] read status failed on master,rc=%d\n", display->name, rc); - goto exit; + goto done; } if (!display->panel->sync_broadcast_en) - goto exit; + goto done; display_for_each_ctrl(i, display) { ctrl = &display->ctrl[i]; @@ -892,11 +862,10 @@ static int dsi_display_status_reg_read(struct dsi_display *display) if (rc <= 0) { DSI_ERR("[%s] read status failed on slave,rc=%d\n", display->name, rc); - goto exit; + goto done; } } -exit: - dsi_display_cmd_engine_disable(display); + done: return rc; } @@ -956,8 +925,7 @@ int dsi_display_check_status(struct drm_connector *connector, void *display, struct dsi_display *dsi_display = display; struct dsi_panel *panel; u32 status_mode; - int rc = 0x1, ret; - u32 mask; + int rc = 0x1; int te_rechecks = 1; if (!dsi_display || !dsi_display->panel) @@ -994,15 +962,7 @@ int dsi_display_check_status(struct drm_connector *connector, void *display, (panel->panel_mode == DSI_OP_VIDEO_MODE)) te_rechecks = 0; - ret = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_ON); - if (ret) - goto release_panel_lock; - - /* Mask error interrupts before attempting ESD read */ - mask = BIT(DSI_FIFO_OVERFLOW) | BIT(DSI_FIFO_UNDERFLOW); dsi_display_set_ctrl_esd_check_flag(dsi_display, true); - dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, true); if (status_mode == ESD_MODE_REG_READ) { rc = dsi_display_status_reg_read(dsi_display); @@ -1018,19 +978,13 @@ int dsi_display_check_status(struct drm_connector *connector, void *display, if (rc <= 0 && te_check_override) rc = dsi_display_status_check_te(dsi_display, te_rechecks); - /* Unmask error interrupts if check passed*/ if (rc > 0) { dsi_display_set_ctrl_esd_check_flag(dsi_display, false); - dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, - false); if (te_check_override && panel->esd_config.esd_enabled == false) rc = dsi_display_status_check_te(dsi_display, te_rechecks); } - dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_OFF); - /* Handle Panel failures during display disable sequence */ if (rc <=0) atomic_set(&panel->esd_recovery_pending, 1); @@ -1061,7 +1015,7 @@ static int dsi_display_cmd_rx(struct dsi_display *display, struct dsi_cmd_desc *cmd) { struct dsi_display_ctrl *m_ctrl = NULL; - u32 mask = 0, flags = 0; + u32 flags = 0; int rc = 0; if (!display || !display->panel) @@ -1078,33 +1032,20 @@ static int dsi_display_cmd_rx(struct dsi_display *display, goto release_panel_lock; } - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_ON); - if (rc) - goto release_panel_lock; - - mask = BIT(DSI_FIFO_OVERFLOW) | BIT(DSI_FIFO_UNDERFLOW); - dsi_display_mask_ctrl_error_interrupts(display, mask, true); - rc = dsi_display_cmd_engine_enable(display); - if (rc) { - DSI_ERR("cmd engine enable failed rc = %d\n", rc); - goto error; - } - flags = DSI_CTRL_CMD_READ; cmd->ctrl_flags = flags; dsi_display_set_cmd_tx_ctrl_flags(display, cmd); + rc = dsi_ctrl_transfer_prepare(m_ctrl->ctrl, cmd->ctrl_flags); + if (rc) { + DSI_ERR("prepare for rx cmd transfer failed rc = %d\n", rc); + goto release_panel_lock; + } rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, cmd); if (rc <= 0) DSI_ERR("rx cmd transfer failed rc = %d\n", rc); + dsi_ctrl_transfer_unprepare(m_ctrl->ctrl, cmd->ctrl_flags); - dsi_display_cmd_engine_disable(display); - -error: - dsi_display_mask_ctrl_error_interrupts(display, mask, false); - dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_OFF); release_panel_lock: dsi_panel_release_panel_lock(display->panel); return rc; @@ -3020,6 +2961,20 @@ static int dsi_display_ctrl_host_disable(struct dsi_display *display) struct dsi_display_ctrl *m_ctrl, *ctrl; bool skip_op = is_skip_op_required(display); + /* + * This is a defensive check. In reality as this is called after panel OFF commands, which + * can never be ASYNC, the controller post_tx_queued flag will never be set when this API + * is called. + */ + display_for_each_ctrl(i, display) { + ctrl = &display->ctrl[i]; + if (!ctrl->ctrl || !(ctrl->ctrl->post_tx_queued)) + continue; + flush_workqueue(display->post_cmd_tx_workq); + cancel_work_sync(&ctrl->ctrl->post_cmd_tx_work); + ctrl->ctrl->post_tx_queued = false; + } + m_ctrl = &display->ctrl[display->cmd_master_idx]; /* * For platforms where ULPS is controlled by DSI controller block, @@ -3213,35 +3168,34 @@ static int dsi_display_wake_up(struct dsi_display *display) return 0; } -static void dsi_display_mask_overflow(struct dsi_display *display, u32 flags, - bool enable) -{ - struct dsi_display_ctrl *ctrl; - int i; - - if (!(flags & DSI_CTRL_CMD_LAST_COMMAND)) - return; - - display_for_each_ctrl(i, display) { - ctrl = &display->ctrl[i]; - if (!ctrl) - continue; - dsi_ctrl_mask_overflow(ctrl->ctrl, enable); - } -} - static int dsi_display_broadcast_cmd(struct dsi_display *display, struct dsi_cmd_desc *cmd) { int rc = 0; struct dsi_display_ctrl *ctrl, *m_ctrl; int i; + u32 flags = 0; /* * 1. Setup commands in FIFO * 2. Trigger commands */ m_ctrl = &display->ctrl[display->cmd_master_idx]; - dsi_display_mask_overflow(display, cmd->ctrl_flags, true); + + display_for_each_ctrl(i, display) { + ctrl = &display->ctrl[i]; + flags = cmd->ctrl_flags; + if (ctrl == m_ctrl) + flags |= DSI_CTRL_CMD_BROADCAST_MASTER; + rc = dsi_ctrl_transfer_prepare(ctrl->ctrl, flags); + if (rc) { + DSI_ERR("[%s] prepare for cmd transfer failed,rc=%d\n", + display->name, rc); + if (ctrl != m_ctrl) + dsi_ctrl_transfer_unprepare(m_ctrl->ctrl, flags | + DSI_CTRL_CMD_BROADCAST_MASTER); + return rc; + } + } cmd->ctrl_flags |= DSI_CTRL_CMD_BROADCAST_MASTER; rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, cmd); @@ -3250,8 +3204,8 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display, struct dsi_cmd display->name, rc); goto error; } - cmd->ctrl_flags &= ~DSI_CTRL_CMD_BROADCAST_MASTER; + cmd->ctrl_flags &= ~DSI_CTRL_CMD_BROADCAST_MASTER; display_for_each_ctrl(i, display) { ctrl = &display->ctrl[i]; if (ctrl == m_ctrl) @@ -3280,7 +3234,14 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display, struct dsi_cmd } error: - dsi_display_mask_overflow(display, cmd->ctrl_flags, false); + display_for_each_ctrl(i, display) { + ctrl = &display->ctrl[i]; + flags = cmd->ctrl_flags; + if (ctrl == m_ctrl) + flags |= DSI_CTRL_CMD_BROADCAST_MASTER; + dsi_ctrl_transfer_unprepare(ctrl->ctrl, flags); + } + return rc; } @@ -3341,7 +3302,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, int dsi_host_transfer_sub(struct mipi_dsi_host *host, struct dsi_cmd_desc *cmd) { struct dsi_display *display; - int rc = 0, ret = 0; + int rc = 0; if (!host || !cmd) { DSI_ERR("Invalid params\n"); @@ -3356,33 +3317,17 @@ int dsi_host_transfer_sub(struct mipi_dsi_host *host, struct dsi_cmd_desc *cmd) return 0; } - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_ON); - if (rc) { - DSI_ERR("[%s] failed to enable all DSI clocks, rc=%d\n", - display->name, rc); - goto error; - } - rc = dsi_display_wake_up(display); if (rc) { - DSI_ERR("[%s] failed to wake up display, rc=%d\n", - display->name, rc); - goto error_disable_clks; - } - - rc = dsi_display_cmd_engine_enable(display); - if (rc) { - DSI_ERR("[%s] failed to enable cmd engine, rc=%d\n", - display->name, rc); - goto error_disable_clks; + DSI_ERR("[%s] failed to wake up display, rc=%d\n", display->name, rc); + goto error; } if (display->tx_cmd_buf == NULL) { rc = dsi_host_alloc_cmd_tx_buffer(display); if (rc) { DSI_ERR("failed to allocate cmd tx buffer memory\n"); - goto error_disable_cmd_engine; + goto error; } } @@ -3392,32 +3337,24 @@ int dsi_host_transfer_sub(struct mipi_dsi_host *host, struct dsi_cmd_desc *cmd) rc = dsi_display_broadcast_cmd(display, cmd); if (rc) { DSI_ERR("[%s] cmd broadcast failed, rc=%d\n", display->name, rc); - goto error_disable_cmd_engine; + goto error; } } else { int idx = cmd->ctrl; - rc = dsi_ctrl_cmd_transfer(display->ctrl[idx].ctrl, cmd); + rc = dsi_ctrl_transfer_prepare(display->ctrl[idx].ctrl, cmd->ctrl_flags); if (rc) { - DSI_ERR("[%s] cmd transfer failed, rc=%d\n", - display->name, rc); - goto error_disable_cmd_engine; + DSI_ERR("failed to prepare for command transfer: %d\n", rc); + goto error; } + + rc = dsi_ctrl_cmd_transfer(display->ctrl[idx].ctrl, cmd); + if (rc) + DSI_ERR("[%s] cmd transfer failed, rc=%d\n", display->name, rc); + + dsi_ctrl_transfer_unprepare(display->ctrl[idx].ctrl, cmd->ctrl_flags); } -error_disable_cmd_engine: - ret = dsi_display_cmd_engine_disable(display); - if (ret) { - DSI_ERR("[%s]failed to disable DSI cmd engine, rc=%d\n", - display->name, ret); - } -error_disable_clks: - ret = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_OFF); - if (ret) { - DSI_ERR("[%s] failed to disable all DSI clocks, rc=%d\n", - display->name, ret); - } error: return rc; } @@ -3627,22 +3564,6 @@ int dsi_pre_clkoff_cb(void *priv, struct dsi_display *display = priv; struct dsi_display_ctrl *ctrl; - - /* - * If Idle Power Collapse occurs immediately after a CMD - * transfer with an asynchronous wait for DMA done, ensure - * that the work queued is scheduled and completed before turning - * off the clocks and disabling interrupts to validate the command - * transfer. - */ - display_for_each_ctrl(i, display) { - ctrl = &display->ctrl[i]; - if (!ctrl->ctrl || !ctrl->ctrl->dma_wait_queued) - continue; - flush_workqueue(display->dma_cmd_workq); - cancel_work_sync(&ctrl->ctrl->dma_cmd_wait); - ctrl->ctrl->dma_wait_queued = false; - } if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) && (l_type & DSI_LINK_LP_CLK)) { /* @@ -5619,7 +5540,7 @@ static int dsi_display_bind(struct device *dev, goto error_ctrl_deinit; } - display_ctrl->ctrl->dma_cmd_workq = display->dma_cmd_workq; + display_ctrl->ctrl->post_cmd_tx_workq = display->post_cmd_tx_workq; memcpy(&info.c_clks[i], (&display_ctrl->ctrl->clk_info.core_clks), sizeof(struct dsi_core_clk_info)); @@ -5796,7 +5717,7 @@ static void dsi_display_unbind(struct device *dev, DSI_ERR("[%s] failed to deinit phy%d driver, rc=%d\n", display->name, i, rc); - display->ctrl->ctrl->dma_cmd_workq = NULL; + display->ctrl->ctrl->post_cmd_tx_workq = NULL; rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl); if (rc) DSI_ERR("[%s] failed to deinit ctrl%d driver, rc=%d\n", @@ -5913,9 +5834,9 @@ int dsi_display_dev_probe(struct platform_device *pdev) goto end; } - display->dma_cmd_workq = create_singlethread_workqueue( - "dsi_dma_cmd_workq"); - if (!display->dma_cmd_workq) { + display->post_cmd_tx_workq = create_singlethread_workqueue( + "dsi_post_cmd_tx_workq"); + if (!display->post_cmd_tx_workq) { DSI_ERR("failed to create work queue\n"); rc = -EINVAL; goto end; @@ -6023,15 +5944,15 @@ int dsi_display_dev_remove(struct platform_device *pdev) /* decrement ref count */ of_node_put(display->panel_node); - if (display->dma_cmd_workq) { - flush_workqueue(display->dma_cmd_workq); - destroy_workqueue(display->dma_cmd_workq); - display->dma_cmd_workq = NULL; + if (display->post_cmd_tx_workq) { + flush_workqueue(display->post_cmd_tx_workq); + destroy_workqueue(display->post_cmd_tx_workq); + display->post_cmd_tx_workq = NULL; display_for_each_ctrl(i, display) { ctrl = &display->ctrl[i]; if (!ctrl->ctrl) continue; - ctrl->ctrl->dma_cmd_workq = NULL; + ctrl->ctrl->post_cmd_tx_workq = NULL; } } @@ -8480,44 +8401,15 @@ int dsi_display_pre_disable(struct dsi_display *display) if (display->config.panel_mode == DSI_OP_CMD_MODE) dsi_panel_switch_cmd_mode_out(display->panel); - if (display->config.panel_mode == DSI_OP_VIDEO_MODE) { - /* - * Add unbalanced vote for clock & cmd engine to enable - * async trigger of pre video to cmd mode switch. - */ - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_ON); - if (rc) { - DSI_ERR("[%s]failed to enable all clocks,rc=%d", - display->name, rc); - goto exit; - } - - rc = dsi_display_cmd_engine_enable(display); - if (rc) { - DSI_ERR("[%s]failed to enable cmd engine,rc=%d", - display->name, rc); - goto error_disable_clks; - } - + if (display->config.panel_mode == DSI_OP_VIDEO_MODE) dsi_panel_switch_video_mode_out(display->panel); - } } else { rc = dsi_panel_pre_disable(display->panel); if (rc) DSI_ERR("[%s] panel pre-disable failed, rc=%d\n", display->name, rc); } - goto exit; -error_disable_clks: - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_OFF); - if (rc) - DSI_ERR("[%s] failed to disable all DSI clocks, rc=%d\n", - display->name, rc); - -exit: mutex_unlock(&display->display_lock); return rc; } @@ -8736,8 +8628,7 @@ end: int dsi_display_unprepare(struct dsi_display *display) { - int rc = 0, i; - struct dsi_display_ctrl *ctrl; + int rc = 0; if (!display) { DSI_ERR("Invalid params\n"); @@ -8758,23 +8649,6 @@ int dsi_display_unprepare(struct dsi_display *display) display->name, rc); } - /* Remove additional vote added for pre_mode_switch_to_cmd */ - if (display->poms_pending && - display->config.panel_mode == DSI_OP_VIDEO_MODE) { - display_for_each_ctrl(i, display) { - ctrl = &display->ctrl[i]; - if (!ctrl->ctrl || !ctrl->ctrl->dma_wait_queued) - continue; - flush_workqueue(display->dma_cmd_workq); - cancel_work_sync(&ctrl->ctrl->dma_cmd_wait); - ctrl->ctrl->dma_wait_queued = false; - } - - dsi_display_cmd_engine_disable(display); - dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_ALL_CLKS, DSI_CLK_OFF); - } - rc = dsi_display_ctrl_host_disable(display); if (rc) DSI_ERR("[%s] failed to disable DSI host, rc=%d\n", diff --git a/msm/dsi/dsi_display.h b/msm/dsi/dsi_display.h index e9445945c2..63a56672c9 100644 --- a/msm/dsi/dsi_display.h +++ b/msm/dsi/dsi_display.h @@ -188,8 +188,7 @@ struct dsi_display_ext_bridge { * @te_source vsync source pin information * @clk_gating_config Clocks for which clock gating needs to be enabled * @queue_cmd_waits Indicates if wait for dma commands done has to be queued. - * @dma_cmd_workq: Pointer to the workqueue of DMA command transfer done - * wait sequence. + * @post_cmd_tx_workq: Pointer to the workqueue of post command transfer work. * @is_active: status of the display * @trusted_vm_env: Set to true, it the executing VM is Trusted VM. * Set to false, otherwise. @@ -287,7 +286,7 @@ struct dsi_display { u32 te_source; u32 clk_gating_config; bool queue_cmd_waits; - struct workqueue_struct *dma_cmd_workq; + struct workqueue_struct *post_cmd_tx_workq; /* panel id of the display */ u64 panel_id;