disp: msm: dsi: handle wait for dma cmd completion

The current solution triggers the DMA command and waits till
the command dma done and ISR signals completion. This change
introduces asynchronous wait after a DCS command has been
triggered. Enable this mode only during pre kickoff, so as to not
block commit thread.

Change-Id: Iead7b6328883e844147d47ff68dc878943879553
Signed-off-by: Satya Rama Aditya Pinapala <psraditya30@codeaurora.org>
This commit is contained in:
Satya Rama Aditya Pinapala
2019-08-30 10:39:00 -07:00
committed by Gerrit - the friendly Code Review server
parent d236dbd5e0
commit 8bc240b71d
5 changed files with 269 additions and 81 deletions

View File

@@ -258,6 +258,95 @@ dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain); return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
} }
static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl)
{
u32 status;
u32 mask = DSI_CMD_MODE_DMA_DONE;
struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
/*
* If a command is triggered right after another command,
* check if the previous command transfer is completed. If
* transfer is done, cancel any work that has been
* queued. Otherwise wait till the work is scheduled and
* completed before triggering the next command by
* flushing the workqueue.
*/
status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
if (atomic_read(&dsi_ctrl->dma_irq_trig)) {
cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
} else if (status & mask) {
atomic_set(&dsi_ctrl->dma_irq_trig, 1);
status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
dsi_hw_ops.clear_interrupt_status(
&dsi_ctrl->hw,
status);
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
DSI_CTRL_DEBUG(dsi_ctrl,
"dma_tx done but irq not yet triggered\n");
} else {
flush_workqueue(dsi_ctrl->dma_cmd_workq);
}
}
static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work)
{
int ret = 0;
struct dsi_ctrl *dsi_ctrl = NULL;
u32 status;
u32 mask = DSI_CMD_MODE_DMA_DONE;
struct dsi_ctrl_hw_ops dsi_hw_ops;
dsi_ctrl = container_of(work, struct dsi_ctrl, dma_cmd_wait);
dsi_hw_ops = dsi_ctrl->hw.ops;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
/*
* This atomic state will be set if ISR has been triggered,
* so the wait is not needed.
*/
if (atomic_read(&dsi_ctrl->dma_irq_trig))
goto done;
/*
* If IRQ wasn't triggered check interrupt status register for
* transfer done before waiting.
*/
status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
if (status & mask) {
status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
status);
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
goto done;
}
ret = wait_for_completion_timeout(
&dsi_ctrl->irq_info.cmd_dma_done,
msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
if (ret == 0) {
status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
if (status & mask) {
status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
status);
DSI_CTRL_WARN(dsi_ctrl,
"dma_tx done but irq not triggered\n");
} else {
DSI_CTRL_ERR(dsi_ctrl,
"Command transfer failed\n");
}
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
}
done:
dsi_ctrl->dma_wait_queued = false;
}
static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_ctrl_driver_ops op, enum dsi_ctrl_driver_ops op,
u32 op_state) u32 op_state)
@@ -1106,12 +1195,12 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
struct dsi_ctrl_cmd_dma_info *cmd_mem, struct dsi_ctrl_cmd_dma_info *cmd_mem,
u32 flags) u32 flags)
{ {
int rc = 0, ret = 0;
u32 hw_flags = 0; u32 hw_flags = 0;
u32 line_no = 0x1; u32 line_no = 0x1;
struct dsi_mode_info *timing; struct dsi_mode_info *timing;
struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops; struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
/* check if custom dma scheduling line needed */ /* check if custom dma scheduling line needed */
if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) && if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED)) (flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
@@ -1156,11 +1245,13 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) { if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
dsi_ctrl_wait_for_video_done(dsi_ctrl); dsi_ctrl_wait_for_video_done(dsi_ctrl);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
if (dsi_hw_ops.mask_error_intr) if (dsi_hw_ops.mask_error_intr)
dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw, dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
BIT(DSI_FIFO_OVERFLOW), true); BIT(DSI_FIFO_OVERFLOW), true);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done); reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
if (flags & DSI_CTRL_CMD_FETCH_MEMORY) { if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
@@ -1180,34 +1271,13 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
cmd, cmd,
hw_flags); hw_flags);
} }
if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
ret = wait_for_completion_timeout( dsi_ctrl->dma_wait_queued = true;
&dsi_ctrl->irq_info.cmd_dma_done, queue_work(dsi_ctrl->dma_cmd_workq,
msecs_to_jiffies(DSI_CTRL_TX_TO_MS)); &dsi_ctrl->dma_cmd_wait);
} else {
if (ret == 0) { dsi_ctrl->dma_wait_queued = false;
u32 status = dsi_hw_ops.get_interrupt_status( dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
&dsi_ctrl->hw);
u32 mask = DSI_CMD_MODE_DMA_DONE;
if (status & mask) {
status |= (DSI_CMD_MODE_DMA_DONE |
DSI_BTA_DONE);
dsi_hw_ops.clear_interrupt_status(
&dsi_ctrl->hw,
status);
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
DSI_CTRL_WARN(dsi_ctrl,
"dma_tx done but irq not triggered\n");
} else {
rc = -ETIMEDOUT;
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
DSI_CTRL_ERR(dsi_ctrl,
"Command transfer failed\n");
}
} }
if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway) if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
@@ -1227,6 +1297,20 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
} }
} }
static u32 dsi_ctrl_validate_msg_flags(const struct mipi_dsi_msg *msg,
u32 flags)
{
/*
* ASYNC command wait mode is not supported for FIFO commands.
* Waiting after a command is transferred cannot be guaranteed
* if DSI_CTRL_CMD_ASYNC_WAIT flag is set.
*/
if ((flags & DSI_CTRL_CMD_FIFO_STORE) ||
msg->wait_ms)
flags &= ~DSI_CTRL_CMD_ASYNC_WAIT;
return flags;
}
static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
const struct mipi_dsi_msg *msg, const struct mipi_dsi_msg *msg,
u32 flags) u32 flags)
@@ -1252,6 +1336,11 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
goto error; goto error;
} }
flags = dsi_ctrl_validate_msg_flags(msg, flags);
if (dsi_ctrl->dma_wait_queued)
dsi_ctrl_flush_cmd_dma_queue(dsi_ctrl);
if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) { if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
cmd_mem.offset = dsi_ctrl->cmd_buffer_iova; cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ? cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
@@ -1793,6 +1882,9 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
dsi_ctrl->irq_info.irq_num = -1; dsi_ctrl->irq_info.irq_num = -1;
dsi_ctrl->irq_info.irq_stat_mask = 0x0; dsi_ctrl->irq_info.irq_stat_mask = 0x0;
INIT_WORK(&dsi_ctrl->dma_cmd_wait, dsi_ctrl_dma_cmd_wait_for_done);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
spin_lock_init(&dsi_ctrl->irq_info.irq_lock); spin_lock_init(&dsi_ctrl->irq_info.irq_lock);
rc = dsi_ctrl_dts_parse(dsi_ctrl, pdev->dev.of_node); rc = dsi_ctrl_dts_parse(dsi_ctrl, pdev->dev.of_node);
@@ -1896,6 +1988,7 @@ static int dsi_ctrl_dev_remove(struct platform_device *pdev)
DSI_CTRL_ERR(dsi_ctrl, DSI_CTRL_ERR(dsi_ctrl,
"failed to deinitialize clocks, rc=%d\n", rc); "failed to deinitialize clocks, rc=%d\n", rc);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
mutex_unlock(&dsi_ctrl->ctrl_lock); mutex_unlock(&dsi_ctrl->ctrl_lock);
mutex_destroy(&dsi_ctrl->ctrl_lock); mutex_destroy(&dsi_ctrl->ctrl_lock);
@@ -2213,10 +2306,9 @@ exit:
return rc; return rc;
} }
int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl) int dsi_ctrl_timing_setup(struct dsi_ctrl *dsi_ctrl)
{ {
int rc = 0; int rc = 0;
if (!dsi_ctrl) { if (!dsi_ctrl) {
DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n"); DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
return -EINVAL; return -EINVAL;
@@ -2224,12 +2316,6 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
mutex_lock(&dsi_ctrl->ctrl_lock); mutex_lock(&dsi_ctrl->ctrl_lock);
dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
&dsi_ctrl->host_config.lane_map);
dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config);
if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) { if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw, dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config, &dsi_ctrl->host_config.common_config,
@@ -2250,8 +2336,29 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, true); dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, true);
} }
mutex_unlock(&dsi_ctrl->ctrl_lock);
return rc;
}
int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
{
int rc = 0;
rc = dsi_ctrl_timing_setup(dsi_ctrl);
if (rc)
return -EINVAL;
mutex_lock(&dsi_ctrl->ctrl_lock);
dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
&dsi_ctrl->host_config.lane_map);
dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config);
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0); dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
dsi_ctrl_enable_error_interrupts(dsi_ctrl); dsi_ctrl_enable_error_interrupts(dsi_ctrl);
dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true); dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
mutex_unlock(&dsi_ctrl->ctrl_lock); mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2489,6 +2596,7 @@ static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
dsi_ctrl_handle_error_status(dsi_ctrl, errors); dsi_ctrl_handle_error_status(dsi_ctrl, errors);
if (status & DSI_CMD_MODE_DMA_DONE) { if (status & DSI_CMD_MODE_DMA_DONE) {
atomic_set(&dsi_ctrl->dma_irq_trig, 1);
dsi_ctrl_disable_status_interrupt(dsi_ctrl, dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE); DSI_SINT_CMD_MODE_DMA_DONE);
complete_all(&dsi_ctrl->irq_info.cmd_dma_done); complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
@@ -2603,6 +2711,7 @@ void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
intr_idx >= DSI_STATUS_INTERRUPT_COUNT) intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
return; return;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) { if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
@@ -2632,6 +2741,7 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
intr_idx >= DSI_STATUS_INTERRUPT_COUNT) intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
return; return;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
@@ -3070,15 +3180,17 @@ error:
*/ */
int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags) int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
{ {
int rc = 0, ret = 0; int rc = 0;
u32 status = 0; struct dsi_ctrl_hw_ops dsi_hw_ops;
u32 mask = (DSI_CMD_MODE_DMA_DONE);
if (!dsi_ctrl) { if (!dsi_ctrl) {
DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n"); DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
return -EINVAL; return -EINVAL;
} }
dsi_hw_ops = dsi_ctrl->hw.ops;
SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
/* Dont trigger the command if this is not the last ocmmand */ /* Dont trigger the command if this is not the last ocmmand */
if (!(flags & DSI_CTRL_CMD_LAST_COMMAND)) if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
return rc; return rc;
@@ -3086,52 +3198,37 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
mutex_lock(&dsi_ctrl->ctrl_lock); mutex_lock(&dsi_ctrl->ctrl_lock);
if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw); dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
if ((flags & DSI_CTRL_CMD_BROADCAST) && if ((flags & DSI_CTRL_CMD_BROADCAST) &&
(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) { (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
dsi_ctrl_wait_for_video_done(dsi_ctrl); dsi_ctrl_wait_for_video_done(dsi_ctrl);
if (dsi_hw_ops.mask_error_intr)
dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
BIT(DSI_FIFO_OVERFLOW), true);
atomic_set(&dsi_ctrl->dma_irq_trig, 0);
dsi_ctrl_enable_status_interrupt(dsi_ctrl, dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL); DSI_SINT_CMD_MODE_DMA_DONE, NULL);
if (dsi_ctrl->hw.ops.mask_error_intr)
dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
BIT(DSI_FIFO_OVERFLOW), true);
reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done); reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
/* trigger command */ /* trigger command */
dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw); dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
ret = wait_for_completion_timeout( dsi_ctrl->dma_wait_queued = true;
&dsi_ctrl->irq_info.cmd_dma_done, queue_work(dsi_ctrl->dma_cmd_workq,
msecs_to_jiffies(DSI_CTRL_TX_TO_MS)); &dsi_ctrl->dma_cmd_wait);
} else {
if (ret == 0) { dsi_ctrl->dma_wait_queued = false;
status = dsi_ctrl->hw.ops.get_interrupt_status( dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
&dsi_ctrl->hw);
if (status & mask) {
status |= (DSI_CMD_MODE_DMA_DONE |
DSI_BTA_DONE);
dsi_ctrl->hw.ops.clear_interrupt_status(
&dsi_ctrl->hw,
status);
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
DSI_CTRL_WARN(dsi_ctrl, "dma_tx done but irq not triggered\n");
} else {
rc = -ETIMEDOUT;
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE);
DSI_CTRL_ERR(dsi_ctrl, "Command transfer failed\n");
}
} }
if (dsi_ctrl->hw.ops.mask_error_intr &&
if (dsi_hw_ops.mask_error_intr &&
!dsi_ctrl->esd_check_underway) !dsi_ctrl->esd_check_underway)
dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw, dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
BIT(DSI_FIFO_OVERFLOW), false); BIT(DSI_FIFO_OVERFLOW), false);
if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) { if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw); dsi_hw_ops.soft_reset(&dsi_ctrl->hw);
dsi_ctrl->cmd_len = 0; dsi_ctrl->cmd_len = 0;
} }
} }

View File

@@ -31,6 +31,8 @@
* @DSI_CTRL_CMD_NON_EMBEDDED_MODE:Transfer cmd packets in non embedded mode. * @DSI_CTRL_CMD_NON_EMBEDDED_MODE:Transfer cmd packets in non embedded mode.
* @DSI_CTRL_CMD_CUSTOM_DMA_SCHED: Use the dma scheduling line number defined in * @DSI_CTRL_CMD_CUSTOM_DMA_SCHED: Use the dma scheduling line number defined in
* display panel dtsi file instead of default. * display panel dtsi file instead of default.
* @DSI_CTRL_CMD_ASYNC_WAIT: Command flag to indicate that the wait for done
* for this command is asynchronous and must be queued.
*/ */
#define DSI_CTRL_CMD_READ 0x1 #define DSI_CTRL_CMD_READ 0x1
#define DSI_CTRL_CMD_BROADCAST 0x2 #define DSI_CTRL_CMD_BROADCAST 0x2
@@ -41,6 +43,7 @@
#define DSI_CTRL_CMD_LAST_COMMAND 0x40 #define DSI_CTRL_CMD_LAST_COMMAND 0x40
#define DSI_CTRL_CMD_NON_EMBEDDED_MODE 0x80 #define DSI_CTRL_CMD_NON_EMBEDDED_MODE 0x80
#define DSI_CTRL_CMD_CUSTOM_DMA_SCHED 0x100 #define DSI_CTRL_CMD_CUSTOM_DMA_SCHED 0x100
#define DSI_CTRL_CMD_ASYNC_WAIT 0x200
/* DSI embedded mode fifo size /* DSI embedded mode fifo size
* If the command is greater than 256 bytes it is sent in non-embedded mode. * If the command is greater than 256 bytes it is sent in non-embedded mode.
@@ -217,6 +220,13 @@ struct dsi_ctrl_interrupts {
* @vaddr: CPU virtual address of cmd buffer. * @vaddr: CPU virtual address of cmd buffer.
* @secure_mode: Indicates if secure-session is in progress * @secure_mode: Indicates if secure-session is in progress
* @esd_check_underway: Indicates if esd status check is in progress * @esd_check_underway: Indicates if esd status check is in progress
* @dma_cmd_wait: Work object waiting on DMA command transfer done.
* @dma_cmd_workq: Pointer to the workqueue of DMA command transfer done
* wait sequence.
* @dma_wait_queued: Indicates if any DMA command transfer wait work
* is queued.
* @dma_irq_trig: Atomic state to indicate DMA done IRQ
* triggered.
* @debugfs_root: Root for debugfs entries. * @debugfs_root: Root for debugfs entries.
* @misr_enable: Frame MISR enable/disable * @misr_enable: Frame MISR enable/disable
* @misr_cache: Cached Frame MISR value * @misr_cache: Cached Frame MISR value
@@ -267,6 +277,10 @@ struct dsi_ctrl {
void *vaddr; void *vaddr;
bool secure_mode; bool secure_mode;
bool esd_check_underway; bool esd_check_underway;
struct work_struct dma_cmd_wait;
struct workqueue_struct *dma_cmd_workq;
bool dma_wait_queued;
atomic_t dma_irq_trig;
/* Debug Information */ /* Debug Information */
struct dentry *debugfs_root; struct dentry *debugfs_root;
@@ -485,18 +499,30 @@ int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable); int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
/** /**
* dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen. * dsi_ctrl_timing_setup() - Setup DSI host config
* @dsi_ctrl: DSI controller handle. * @dsi_ctrl: DSI controller handle.
* *
* Initializes DSI controller hardware with host configuration provided by * Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during * dsi_ctrl_update_host_config(). This is called while setting up DSI host
* DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been * through dsi_ctrl_setup() and after any ROI change.
* performed.
* *
* Also used to program the video mode timing values. * Also used to program the video mode timing values.
* *
* Return: error code. * Return: error code.
*/ */
int dsi_ctrl_timing_setup(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
* @dsi_ctrl: DSI controller handle.
*
* Initialization of DSI controller hardware with host configuration and
* enabling required interrupts. Initialization can be performed only during
* DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
* performed.
*
* Return: error code.
*/
int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl); int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl);
/** /**

View File

@@ -2699,6 +2699,12 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
flags |= DSI_CTRL_CMD_LAST_COMMAND; flags |= DSI_CTRL_CMD_LAST_COMMAND;
m_flags |= DSI_CTRL_CMD_LAST_COMMAND; m_flags |= DSI_CTRL_CMD_LAST_COMMAND;
} }
if (display->queue_cmd_waits) {
flags |= DSI_CTRL_CMD_ASYNC_WAIT;
m_flags |= DSI_CTRL_CMD_ASYNC_WAIT;
}
/* /*
* 1. Setup commands in FIFO * 1. Setup commands in FIFO
* 2. Trigger commands * 2. Trigger commands
@@ -2852,9 +2858,13 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
} else { } else {
int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ? int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ?
msg->ctrl : 0; msg->ctrl : 0;
u32 cmd_flags = DSI_CTRL_CMD_FETCH_MEMORY;
if (display->queue_cmd_waits)
cmd_flags |= DSI_CTRL_CMD_ASYNC_WAIT;
rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg, rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
DSI_CTRL_CMD_FETCH_MEMORY); cmd_flags);
if (rc) { if (rc) {
DSI_ERR("[%s] cmd transfer failed, rc=%d\n", DSI_ERR("[%s] cmd transfer failed, rc=%d\n",
display->name, rc); display->name, rc);
@@ -3151,6 +3161,22 @@ int dsi_pre_clkoff_cb(void *priv,
struct dsi_display *display = priv; struct dsi_display *display = priv;
struct dsi_display_ctrl *ctrl; struct dsi_display_ctrl *ctrl;
/*
* If Idle Power Collapse occurs immediately after a CMD
* transfer with an asynchronous wait for DMA done, ensure
* that the work queued is scheduled and completed before turning
* off the clocks and disabling interrupts to validate the command
* transfer.
*/
display_for_each_ctrl(i, display) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl || !ctrl->ctrl->dma_wait_queued)
continue;
flush_workqueue(display->dma_cmd_workq);
cancel_work_sync(&ctrl->ctrl->dma_cmd_wait);
ctrl->ctrl->dma_wait_queued = false;
}
if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) && if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
(l_type & DSI_LINK_LP_CLK)) { (l_type & DSI_LINK_LP_CLK)) {
/* /*
@@ -4836,6 +4862,7 @@ static int dsi_display_bind(struct device *dev,
goto error_ctrl_deinit; goto error_ctrl_deinit;
} }
display_ctrl->ctrl->dma_cmd_workq = display->dma_cmd_workq;
memcpy(&info.c_clks[i], memcpy(&info.c_clks[i],
(&display_ctrl->ctrl->clk_info.core_clks), (&display_ctrl->ctrl->clk_info.core_clks),
sizeof(struct dsi_core_clk_info)); sizeof(struct dsi_core_clk_info));
@@ -5013,6 +5040,7 @@ static void dsi_display_unbind(struct device *dev,
DSI_ERR("[%s] failed to deinit phy%d driver, rc=%d\n", DSI_ERR("[%s] failed to deinit phy%d driver, rc=%d\n",
display->name, i, rc); display->name, i, rc);
display->ctrl->ctrl->dma_cmd_workq = NULL;
rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl); rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
if (rc) if (rc)
DSI_ERR("[%s] failed to deinit ctrl%d driver, rc=%d\n", DSI_ERR("[%s] failed to deinit ctrl%d driver, rc=%d\n",
@@ -5101,6 +5129,14 @@ int dsi_display_dev_probe(struct platform_device *pdev)
goto end; goto end;
} }
display->dma_cmd_workq = create_singlethread_workqueue(
"dsi_dma_cmd_workq");
if (!display->dma_cmd_workq) {
DSI_ERR("failed to create work queue\n");
rc = -EINVAL;
goto end;
}
display->display_type = of_get_property(pdev->dev.of_node, display->display_type = of_get_property(pdev->dev.of_node,
"label", NULL); "label", NULL);
if (!display->display_type) if (!display->display_type)
@@ -5164,8 +5200,9 @@ end:
int dsi_display_dev_remove(struct platform_device *pdev) int dsi_display_dev_remove(struct platform_device *pdev)
{ {
int rc = 0; int rc = 0i, i = 0;
struct dsi_display *display; struct dsi_display *display;
struct dsi_display_ctrl *ctrl;
if (!pdev) { if (!pdev) {
DSI_ERR("Invalid device\n"); DSI_ERR("Invalid device\n");
@@ -5177,6 +5214,18 @@ int dsi_display_dev_remove(struct platform_device *pdev)
/* decrement ref count */ /* decrement ref count */
of_node_put(display->panel_node); of_node_put(display->panel_node);
if (display->dma_cmd_workq) {
flush_workqueue(display->dma_cmd_workq);
destroy_workqueue(display->dma_cmd_workq);
display->dma_cmd_workq = NULL;
display_for_each_ctrl(i, display) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl)
continue;
ctrl->ctrl->dma_cmd_workq = NULL;
}
}
(void)_dsi_display_dev_deinit(display); (void)_dsi_display_dev_deinit(display);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);
@@ -7003,7 +7052,7 @@ static int dsi_display_set_roi(struct dsi_display *display,
} }
/* re-program the ctrl with the timing based on the new roi */ /* re-program the ctrl with the timing based on the new roi */
rc = dsi_ctrl_setup(ctrl->ctrl); rc = dsi_ctrl_timing_setup(ctrl->ctrl);
if (rc) { if (rc) {
DSI_ERR("dsi_ctrl_setup failed rc %d\n", rc); DSI_ERR("dsi_ctrl_setup failed rc %d\n", rc);
return rc; return rc;

View File

@@ -182,6 +182,9 @@ struct dsi_display_ext_bridge {
* @esd_trigger field indicating ESD trigger through debugfs * @esd_trigger field indicating ESD trigger through debugfs
* @te_source vsync source pin information * @te_source vsync source pin information
* @clk_gating_config Clocks for which clock gating needs to be enabled * @clk_gating_config Clocks for which clock gating needs to be enabled
* @queue_cmd_waits Indicates if wait for dma commands done has to be queued.
* @dma_cmd_workq: Pointer to the workqueue of DMA command transfer done
* wait sequence.
*/ */
struct dsi_display { struct dsi_display {
struct platform_device *pdev; struct platform_device *pdev;
@@ -266,6 +269,8 @@ struct dsi_display {
u32 te_source; u32 te_source;
u32 clk_gating_config; u32 clk_gating_config;
bool queue_cmd_waits;
struct workqueue_struct *dma_cmd_workq;
}; };
int dsi_display_dev_probe(struct platform_device *pdev); int dsi_display_dev_probe(struct platform_device *pdev);

View File

@@ -759,6 +759,7 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
struct sde_connector *c_conn; struct sde_connector *c_conn;
struct sde_connector_state *c_state; struct sde_connector_state *c_state;
struct msm_display_kickoff_params params; struct msm_display_kickoff_params params;
struct dsi_display *display;
int rc; int rc;
if (!connector) { if (!connector) {
@@ -773,6 +774,15 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
return -EINVAL; return -EINVAL;
} }
/*
* During pre kickoff DCS commands have to have an
* asynchronous wait to avoid an unnecessary stall
* in pre-kickoff. This flag must be reset at the
* end of display pre-kickoff.
*/
display = (struct dsi_display *)c_conn->display;
display->queue_cmd_waits = true;
rc = _sde_connector_update_dirty_properties(connector); rc = _sde_connector_update_dirty_properties(connector);
if (rc) { if (rc) {
SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR); SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
@@ -789,6 +799,7 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params); rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
display->queue_cmd_waits = false;
end: end:
return rc; return rc;
} }