فهرست منبع

Merge remote-tracking branch 'quic/display-kernel.lnx.4.19' into display-kernel.lnx.1.0

* quic/display-kernel.lnx.4.19:
  disp: msm: dsi: handle wait for dma cmd completion
  disp: msm: dsi: Config panel test pin to input mode when panel off
  disp: msm: adjust smmu detach sequence to include unmapping
  disp: msm: sde: Fix 32-bit compilation issues
  disp: msm: dsi: reject seamless commit with active changed
  disp: msm: update debug dump for ltm block
  disp: msm: remove runtime_pm support from rsc driver
  disp: msm: sde: avoid encoder power-collapse with pending frames
  disp: msm: sde: handle all error cases during sui transitions
  drm/msm/dsi: bypass dsi clock set during changing mode
  disp: msm: sde: remove dspp blocking
  disp: msm: sde: update avr mode config during commit prepare
  disp: msm: sde: add one-shot qsync mode support
  disp: msm: sde: update wr_ptr_success state post wait
  disp: msm: sde: allow qsync support along with VRR

Change-Id: Ib2a2a855a2fa49ed74789ed470ee669b21a95500
Signed-off-by: Alisha Thapaliya <[email protected]>
Alisha Thapaliya 5 سال پیش
والد
کامیت
2ee7f3858a

+ 173 - 76
msm/dsi/dsi_ctrl.c

@@ -258,6 +258,95 @@ dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
 	return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
 }
 
+static void dsi_ctrl_flush_cmd_dma_queue(struct dsi_ctrl *dsi_ctrl)
+{
+	u32 status;
+	u32 mask = DSI_CMD_MODE_DMA_DONE;
+	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
+
+	/*
+	 * If a command is triggered right after another command,
+	 * check if the previous command transfer is completed. If
+	 * transfer is done, cancel any work that has been
+	 * queued. Otherwise wait till the work is scheduled and
+	 * completed before triggering the next command by
+	 * flushing the workqueue.
+	 */
+	status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
+	if (atomic_read(&dsi_ctrl->dma_irq_trig)) {
+		cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
+	} else if (status & mask) {
+		atomic_set(&dsi_ctrl->dma_irq_trig, 1);
+		status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
+		dsi_hw_ops.clear_interrupt_status(
+						&dsi_ctrl->hw,
+						status);
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+				DSI_SINT_CMD_MODE_DMA_DONE);
+		complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+		cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
+		DSI_CTRL_DEBUG(dsi_ctrl,
+				"dma_tx done but irq not yet triggered\n");
+	} else {
+		flush_workqueue(dsi_ctrl->dma_cmd_workq);
+	}
+}
+
+static void dsi_ctrl_dma_cmd_wait_for_done(struct work_struct *work)
+{
+	int ret = 0;
+	struct dsi_ctrl *dsi_ctrl = NULL;
+	u32 status;
+	u32 mask = DSI_CMD_MODE_DMA_DONE;
+	struct dsi_ctrl_hw_ops dsi_hw_ops;
+
+	dsi_ctrl = container_of(work, struct dsi_ctrl, dma_cmd_wait);
+	dsi_hw_ops = dsi_ctrl->hw.ops;
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+
+	/*
+	 * This atomic state will be set if ISR has been triggered,
+	 * so the wait is not needed.
+	 */
+	if (atomic_read(&dsi_ctrl->dma_irq_trig))
+		goto done;
+	/*
+	 * If IRQ wasn't triggered check interrupt status register for
+	 * transfer done before waiting.
+	 */
+	status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
+	if (status & mask) {
+		status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
+		dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
+				status);
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+				DSI_SINT_CMD_MODE_DMA_DONE);
+		goto done;
+	}
+
+	ret = wait_for_completion_timeout(
+			&dsi_ctrl->irq_info.cmd_dma_done,
+			msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+	if (ret == 0) {
+		status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
+		if (status & mask) {
+			status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
+			dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
+					status);
+			DSI_CTRL_WARN(dsi_ctrl,
+					"dma_tx done but irq not triggered\n");
+		} else {
+			DSI_CTRL_ERR(dsi_ctrl,
+					"Command transfer failed\n");
+		}
+		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+					DSI_SINT_CMD_MODE_DMA_DONE);
+	}
+
+done:
+	dsi_ctrl->dma_wait_queued = false;
+}
+
 static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
 				enum dsi_ctrl_driver_ops op,
 				u32 op_state)
@@ -847,8 +936,8 @@ static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
 		bit_rate = config->bit_clk_rate_hz_override * num_of_lanes;
 	} else if (config->panel_mode == DSI_OP_CMD_MODE) {
 		/* Calculate the bit rate needed to match dsi transfer time */
-		bit_rate = mult_frac(min_dsi_clk_hz, frame_time_us,
-				dsi_transfer_time_us);
+		bit_rate = min_dsi_clk_hz * frame_time_us;
+		do_div(bit_rate, dsi_transfer_time_us);
 		bit_rate = bit_rate * num_of_lanes;
 	} else {
 		h_period = DSI_H_TOTAL_DSC(timing);
@@ -1106,12 +1195,12 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 				struct dsi_ctrl_cmd_dma_info *cmd_mem,
 				u32 flags)
 {
-	int rc = 0, ret = 0;
 	u32 hw_flags = 0;
 	u32 line_no = 0x1;
 	struct dsi_mode_info *timing;
 	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
 
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
 	/* check if custom dma scheduling line needed */
 	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
 		(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
@@ -1156,11 +1245,13 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
 		dsi_ctrl_wait_for_video_done(dsi_ctrl);
-		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
-					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
 		if (dsi_hw_ops.mask_error_intr)
 			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
 					BIT(DSI_FIFO_OVERFLOW), true);
+
+		atomic_set(&dsi_ctrl->dma_irq_trig, 0);
+		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
 		if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
@@ -1180,34 +1271,13 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 							      cmd,
 							      hw_flags);
 		}
-
-		ret = wait_for_completion_timeout(
-				&dsi_ctrl->irq_info.cmd_dma_done,
-				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
-
-		if (ret == 0) {
-			u32 status = dsi_hw_ops.get_interrupt_status(
-								&dsi_ctrl->hw);
-			u32 mask = DSI_CMD_MODE_DMA_DONE;
-
-			if (status & mask) {
-				status |= (DSI_CMD_MODE_DMA_DONE |
-						DSI_BTA_DONE);
-				dsi_hw_ops.clear_interrupt_status(
-								&dsi_ctrl->hw,
-								status);
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
-				DSI_CTRL_WARN(dsi_ctrl,
-					"dma_tx done but irq not triggered\n");
-			} else {
-				rc = -ETIMEDOUT;
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				DSI_CTRL_ERR(dsi_ctrl,
-						"Command transfer failed\n");
-			}
+		if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
+			dsi_ctrl->dma_wait_queued = true;
+			queue_work(dsi_ctrl->dma_cmd_workq,
+					&dsi_ctrl->dma_cmd_wait);
+		} else {
+			dsi_ctrl->dma_wait_queued = false;
+			dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
 		}
 
 		if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
@@ -1227,6 +1297,20 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 	}
 }
 
+static u32 dsi_ctrl_validate_msg_flags(const struct mipi_dsi_msg *msg,
+				u32 flags)
+{
+	/*
+	 * ASYNC command wait mode is not supported for FIFO commands.
+	 * Waiting after a command is transferred cannot be guaranteed
+	 * if DSI_CTRL_CMD_ASYNC_WAIT flag is set.
+	 */
+	if ((flags & DSI_CTRL_CMD_FIFO_STORE) ||
+			msg->wait_ms)
+		flags &= ~DSI_CTRL_CMD_ASYNC_WAIT;
+	return flags;
+}
+
 static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 			  const struct mipi_dsi_msg *msg,
 			  u32 flags)
@@ -1252,6 +1336,11 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 		goto error;
 	}
 
+	flags = dsi_ctrl_validate_msg_flags(msg, flags);
+
+	if (dsi_ctrl->dma_wait_queued)
+		dsi_ctrl_flush_cmd_dma_queue(dsi_ctrl);
+
 	if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
 		cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
 		cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
@@ -1793,6 +1882,9 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
 	dsi_ctrl->irq_info.irq_num = -1;
 	dsi_ctrl->irq_info.irq_stat_mask = 0x0;
 
+	INIT_WORK(&dsi_ctrl->dma_cmd_wait, dsi_ctrl_dma_cmd_wait_for_done);
+	atomic_set(&dsi_ctrl->dma_irq_trig, 0);
+
 	spin_lock_init(&dsi_ctrl->irq_info.irq_lock);
 
 	rc = dsi_ctrl_dts_parse(dsi_ctrl, pdev->dev.of_node);
@@ -1896,6 +1988,7 @@ static int dsi_ctrl_dev_remove(struct platform_device *pdev)
 		DSI_CTRL_ERR(dsi_ctrl,
 				"failed to deinitialize clocks, rc=%d\n", rc);
 
+	atomic_set(&dsi_ctrl->dma_irq_trig, 0);
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
 
 	mutex_destroy(&dsi_ctrl->ctrl_lock);
@@ -2213,10 +2306,9 @@ exit:
 	return rc;
 }
 
-int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
+int dsi_ctrl_timing_setup(struct dsi_ctrl *dsi_ctrl)
 {
 	int rc = 0;
-
 	if (!dsi_ctrl) {
 		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
 		return -EINVAL;
@@ -2224,12 +2316,6 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
-	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
-					&dsi_ctrl->host_config.lane_map);
-
-	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
-				    &dsi_ctrl->host_config.common_config);
-
 	if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
 		dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.common_config,
@@ -2250,8 +2336,29 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 		dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, true);
 	}
 
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
+int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
+{
+	int rc = 0;
+
+	rc = dsi_ctrl_timing_setup(dsi_ctrl);
+	if (rc)
+		return -EINVAL;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+					&dsi_ctrl->host_config.lane_map);
+
+	dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+				    &dsi_ctrl->host_config.common_config);
+
 	dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
 	dsi_ctrl_enable_error_interrupts(dsi_ctrl);
+
 	dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
 
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2489,6 +2596,7 @@ static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
 		dsi_ctrl_handle_error_status(dsi_ctrl, errors);
 
 	if (status & DSI_CMD_MODE_DMA_DONE) {
+		atomic_set(&dsi_ctrl->dma_irq_trig, 1);
 		dsi_ctrl_disable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE);
 		complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
@@ -2603,6 +2711,7 @@ void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
 			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
 		return;
 
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
 	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
 
 	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
@@ -2632,6 +2741,7 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
 			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
 		return;
 
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
 	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
 
 	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
@@ -3070,15 +3180,17 @@ error:
  */
 int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
 {
-	int rc = 0, ret = 0;
-	u32 status = 0;
-	u32 mask = (DSI_CMD_MODE_DMA_DONE);
+	int rc = 0;
+	struct dsi_ctrl_hw_ops dsi_hw_ops;
 
 	if (!dsi_ctrl) {
 		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
 		return -EINVAL;
 	}
 
+	dsi_hw_ops = dsi_ctrl->hw.ops;
+
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
 	/* Dont trigger the command if this is not the last ocmmand */
 	if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
 		return rc;
@@ -3086,52 +3198,37 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
 	if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
-		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
 
 	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
 		(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
 		dsi_ctrl_wait_for_video_done(dsi_ctrl);
+		if (dsi_hw_ops.mask_error_intr)
+			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), true);
+		atomic_set(&dsi_ctrl->dma_irq_trig, 0);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
-		if (dsi_ctrl->hw.ops.mask_error_intr)
-			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), true);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
 		/* trigger command */
-		dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
-
-		ret = wait_for_completion_timeout(
-				&dsi_ctrl->irq_info.cmd_dma_done,
-				msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
-
-		if (ret == 0) {
-			status = dsi_ctrl->hw.ops.get_interrupt_status(
-								&dsi_ctrl->hw);
-			if (status & mask) {
-				status |= (DSI_CMD_MODE_DMA_DONE |
-						DSI_BTA_DONE);
-				dsi_ctrl->hw.ops.clear_interrupt_status(
-								&dsi_ctrl->hw,
-								status);
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
-				DSI_CTRL_WARN(dsi_ctrl, "dma_tx done but irq not triggered\n");
-			} else {
-				rc = -ETIMEDOUT;
-				dsi_ctrl_disable_status_interrupt(dsi_ctrl,
-						DSI_SINT_CMD_MODE_DMA_DONE);
-				DSI_CTRL_ERR(dsi_ctrl, "Command transfer failed\n");
-			}
+		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
+		if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
+			dsi_ctrl->dma_wait_queued = true;
+			queue_work(dsi_ctrl->dma_cmd_workq,
+					&dsi_ctrl->dma_cmd_wait);
+		} else {
+			dsi_ctrl->dma_wait_queued = false;
+			dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
 		}
-		if (dsi_ctrl->hw.ops.mask_error_intr &&
+
+		if (dsi_hw_ops.mask_error_intr &&
 				!dsi_ctrl->esd_check_underway)
-			dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
 					BIT(DSI_FIFO_OVERFLOW), false);
 
 		if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
-			dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+			dsi_hw_ops.soft_reset(&dsi_ctrl->hw);
 			dsi_ctrl->cmd_len = 0;
 		}
 	}

+ 30 - 4
msm/dsi/dsi_ctrl.h

@@ -31,6 +31,8 @@
  * @DSI_CTRL_CMD_NON_EMBEDDED_MODE:Transfer cmd packets in non embedded mode.
  * @DSI_CTRL_CMD_CUSTOM_DMA_SCHED: Use the dma scheduling line number defined in
  *				   display panel dtsi file instead of default.
+ * @DSI_CTRL_CMD_ASYNC_WAIT: Command flag to indicate that the wait for done
+ *			for this command is asynchronous and must be queued.
  */
 #define DSI_CTRL_CMD_READ             0x1
 #define DSI_CTRL_CMD_BROADCAST        0x2
@@ -41,6 +43,7 @@
 #define DSI_CTRL_CMD_LAST_COMMAND     0x40
 #define DSI_CTRL_CMD_NON_EMBEDDED_MODE 0x80
 #define DSI_CTRL_CMD_CUSTOM_DMA_SCHED  0x100
+#define DSI_CTRL_CMD_ASYNC_WAIT 0x200
 
 /* DSI embedded mode fifo size
  * If the command is greater than 256 bytes it is sent in non-embedded mode.
@@ -217,6 +220,13 @@ struct dsi_ctrl_interrupts {
  * @vaddr:               CPU virtual address of cmd buffer.
  * @secure_mode:         Indicates if secure-session is in progress
  * @esd_check_underway:  Indicates if esd status check is in progress
+ * @dma_cmd_wait:	Work object waiting on DMA command transfer done.
+ * @dma_cmd_workq:	Pointer to the workqueue of DMA command transfer done
+ *				wait sequence.
+ * @dma_wait_queued:	Indicates if any DMA command transfer wait work
+ *				is queued.
+ * @dma_irq_trig:		 Atomic state to indicate DMA done IRQ
+ *				triggered.
  * @debugfs_root:        Root for debugfs entries.
  * @misr_enable:         Frame MISR enable/disable
  * @misr_cache:          Cached Frame MISR value
@@ -267,6 +277,10 @@ struct dsi_ctrl {
 	void *vaddr;
 	bool secure_mode;
 	bool esd_check_underway;
+	struct work_struct dma_cmd_wait;
+	struct workqueue_struct *dma_cmd_workq;
+	bool dma_wait_queued;
+	atomic_t dma_irq_trig;
 
 	/* Debug Information */
 	struct dentry *debugfs_root;
@@ -485,18 +499,30 @@ int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
 int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
 
 /**
- * dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
+ * dsi_ctrl_timing_setup() - Setup DSI host config
  * @dsi_ctrl:        DSI controller handle.
  *
  * Initializes DSI controller hardware with host configuration provided by
- * dsi_ctrl_update_host_config(). Initialization can be performed only during
- * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
- * performed.
+ * dsi_ctrl_update_host_config(). This is called while setting up DSI host
+ * through dsi_ctrl_setup() and after any ROI change.
  *
  * Also used to program the video mode timing values.
  *
  * Return: error code.
  */
+int dsi_ctrl_timing_setup(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_setup() - Setup DSI host hardware while coming out of idle screen.
+ * @dsi_ctrl:        DSI controller handle.
+ *
+ * Initialization of DSI controller hardware with host configuration and
+ * enabling required interrupts. Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl);
 
 /**

+ 81 - 14
msm/dsi/dsi_display.c

@@ -2699,6 +2699,12 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
 		flags |= DSI_CTRL_CMD_LAST_COMMAND;
 		m_flags |= DSI_CTRL_CMD_LAST_COMMAND;
 	}
+
+	if (display->queue_cmd_waits) {
+		flags |= DSI_CTRL_CMD_ASYNC_WAIT;
+		m_flags |= DSI_CTRL_CMD_ASYNC_WAIT;
+	}
+
 	/*
 	 * 1. Setup commands in FIFO
 	 * 2. Trigger commands
@@ -2852,9 +2858,13 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
 	} else {
 		int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ?
 				msg->ctrl : 0;
+		u32 cmd_flags = DSI_CTRL_CMD_FETCH_MEMORY;
+
+		if (display->queue_cmd_waits)
+			cmd_flags |= DSI_CTRL_CMD_ASYNC_WAIT;
 
 		rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
-					  DSI_CTRL_CMD_FETCH_MEMORY);
+				cmd_flags);
 		if (rc) {
 			DSI_ERR("[%s] cmd transfer failed, rc=%d\n",
 			       display->name, rc);
@@ -3151,6 +3161,22 @@ int dsi_pre_clkoff_cb(void *priv,
 	struct dsi_display *display = priv;
 	struct dsi_display_ctrl *ctrl;
 
+
+	/*
+	 * If Idle Power Collapse occurs immediately after a CMD
+	 * transfer with an asynchronous wait for DMA done, ensure
+	 * that the work queued is scheduled and completed before turning
+	 * off the clocks and disabling interrupts to validate the command
+	 * transfer.
+	 */
+	display_for_each_ctrl(i, display) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || !ctrl->ctrl->dma_wait_queued)
+			continue;
+		flush_workqueue(display->dma_cmd_workq);
+		cancel_work_sync(&ctrl->ctrl->dma_cmd_wait);
+		ctrl->ctrl->dma_wait_queued = false;
+	}
 	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
 		(l_type & DSI_LINK_LP_CLK)) {
 		/*
@@ -4489,8 +4515,13 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 
 	if ((mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) &&
 			(display->panel->panel_mode == DSI_OP_CMD_MODE)) {
+		u64 cur_bitclk = display->panel->cur_mode->timing.clk_rate_hz;
+		u64 to_bitclk = mode->timing.clk_rate_hz;
 		commit_phy_timing = true;
-		atomic_set(&display->clkrate_change_pending, 1);
+
+		/* No need to set clkrate pending flag if clocks are same */
+		if (cur_bitclk != to_bitclk)
+			atomic_set(&display->clkrate_change_pending, 1);
 
 		dsi_display_validate_dms_fps(display->panel->cur_mode, mode);
 	}
@@ -4831,6 +4862,7 @@ static int dsi_display_bind(struct device *dev,
 			goto error_ctrl_deinit;
 		}
 
+		display_ctrl->ctrl->dma_cmd_workq = display->dma_cmd_workq;
 		memcpy(&info.c_clks[i],
 				(&display_ctrl->ctrl->clk_info.core_clks),
 				sizeof(struct dsi_core_clk_info));
@@ -5008,6 +5040,7 @@ static void dsi_display_unbind(struct device *dev,
 			DSI_ERR("[%s] failed to deinit phy%d driver, rc=%d\n",
 			       display->name, i, rc);
 
+		display->ctrl->ctrl->dma_cmd_workq = NULL;
 		rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
 		if (rc)
 			DSI_ERR("[%s] failed to deinit ctrl%d driver, rc=%d\n",
@@ -5096,6 +5129,14 @@ int dsi_display_dev_probe(struct platform_device *pdev)
 		goto end;
 	}
 
+	display->dma_cmd_workq = create_singlethread_workqueue(
+			"dsi_dma_cmd_workq");
+	if (!display->dma_cmd_workq)  {
+		DSI_ERR("failed to create work queue\n");
+		rc =  -EINVAL;
+		goto end;
+	}
+
 	display->display_type = of_get_property(pdev->dev.of_node,
 				"label", NULL);
 	if (!display->display_type)
@@ -5159,8 +5200,9 @@ end:
 
 int dsi_display_dev_remove(struct platform_device *pdev)
 {
-	int rc = 0;
+	int rc = 0i, i = 0;
 	struct dsi_display *display;
+	struct dsi_display_ctrl *ctrl;
 
 	if (!pdev) {
 		DSI_ERR("Invalid device\n");
@@ -5172,6 +5214,18 @@ int dsi_display_dev_remove(struct platform_device *pdev)
 	/* decrement ref count */
 	of_node_put(display->panel_node);
 
+	if (display->dma_cmd_workq) {
+		flush_workqueue(display->dma_cmd_workq);
+		destroy_workqueue(display->dma_cmd_workq);
+		display->dma_cmd_workq = NULL;
+		display_for_each_ctrl(i, display) {
+			ctrl = &display->ctrl[i];
+			if (!ctrl->ctrl)
+				continue;
+			ctrl->ctrl->dma_cmd_workq = NULL;
+		}
+	}
+
 	(void)_dsi_display_dev_deinit(display);
 
 	platform_set_drvdata(pdev, NULL);
@@ -6998,7 +7052,7 @@ static int dsi_display_set_roi(struct dsi_display *display,
 		}
 
 		/* re-program the ctrl with the timing based on the new roi */
-		rc = dsi_ctrl_setup(ctrl->ctrl);
+		rc = dsi_ctrl_timing_setup(ctrl->ctrl);
 		if (rc) {
 			DSI_ERR("dsi_ctrl_setup failed rc %d\n", rc);
 			return rc;
@@ -7014,21 +7068,11 @@ int dsi_display_pre_kickoff(struct drm_connector *connector,
 {
 	int rc = 0;
 	int i;
-	bool enable;
 
 	/* check and setup MISR */
 	if (display->misr_enable)
 		_dsi_display_setup_misr(display);
 
-	if (params->qsync_update) {
-		enable = (params->qsync_mode > 0) ? true : false;
-		rc = dsi_display_qsync(display, enable);
-		if (rc)
-			DSI_ERR("%s failed to send qsync commands\n",
-				__func__);
-		SDE_EVT32(params->qsync_mode, rc);
-	}
-
 	rc = dsi_display_set_roi(display, params->rois);
 
 	/* dynamic DSI clock setting */
@@ -7109,6 +7153,29 @@ error_out:
 	return rc;
 }
 
+int dsi_display_pre_commit(void *display,
+		struct msm_display_conn_params *params)
+{
+	bool enable = false;
+	int rc = 0;
+
+	if (!display || !params) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (params->qsync_update) {
+		enable = (params->qsync_mode > 0) ? true : false;
+		rc = dsi_display_qsync(display, enable);
+		if (rc)
+			pr_err("%s failed to send qsync commands\n",
+				__func__);
+		SDE_EVT32(params->qsync_mode, rc);
+	}
+
+	return rc;
+}
+
 int dsi_display_enable(struct dsi_display *display)
 {
 	int rc = 0;

+ 15 - 0
msm/dsi/dsi_display.h

@@ -182,6 +182,9 @@ struct dsi_display_ext_bridge {
  * @esd_trigger       field indicating ESD trigger through debugfs
  * @te_source         vsync source pin information
  * @clk_gating_config Clocks for which clock gating needs to be enabled
+ * @queue_cmd_waits   Indicates if wait for dma commands done has to be queued.
+ * @dma_cmd_workq:	Pointer to the workqueue of DMA command transfer done
+ *				wait sequence.
  */
 struct dsi_display {
 	struct platform_device *pdev;
@@ -266,6 +269,8 @@ struct dsi_display {
 
 	u32 te_source;
 	u32 clk_gating_config;
+	bool queue_cmd_waits;
+	struct workqueue_struct *dma_cmd_workq;
 };
 
 int dsi_display_dev_probe(struct platform_device *pdev);
@@ -679,6 +684,16 @@ int dsi_display_set_power(struct drm_connector *connector,
 int dsi_display_pre_kickoff(struct drm_connector *connector,
 		struct dsi_display *display,
 		struct msm_display_kickoff_params *params);
+
+/*
+ * dsi_display_pre_commit - program pre commit features
+ * @display: Pointer to private display structure
+ * @params: Parameters for pre commit time programming
+ * Returns: Zero on success
+ */
+int dsi_display_pre_commit(void *display,
+		struct msm_display_conn_params *params);
+
 /**
  * dsi_display_get_dst_format() - get dst_format from DSI display
  * @connector:        Pointer to drm connector structure

+ 29 - 1
msm/dsi/dsi_drm.c

@@ -423,6 +423,15 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	}
 
+	/* Reject seamless transition when active changed */
+	if (crtc_state->active_changed &&
+		((dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR) ||
+		(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK))) {
+		DSI_ERR("seamless upon active changed 0x%x %d\n",
+			dsi_mode.dsi_mode_flags, crtc_state->active_changed);
+		return false;
+	}
+
 	/* convert back to drm mode, propagating the private info & flags */
 	dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);
 
@@ -911,6 +920,17 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector,
 	return dsi_display_pre_kickoff(connector, display, params);
 }
 
+int dsi_conn_prepare_commit(void *display,
+		struct msm_display_conn_params *params)
+{
+	if (!display || !params) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	return dsi_display_pre_commit(display, params);
+}
+
 void dsi_conn_enable_event(struct drm_connector *connector,
 		uint32_t event_idx, bool enable, void *display)
 {
@@ -925,7 +945,8 @@ void dsi_conn_enable_event(struct drm_connector *connector,
 			event_idx, &event_info, enable);
 }
 
-int dsi_conn_post_kickoff(struct drm_connector *connector)
+int dsi_conn_post_kickoff(struct drm_connector *connector,
+	struct msm_display_conn_params *params)
 {
 	struct drm_encoder *encoder;
 	struct dsi_bridge *c_bridge;
@@ -933,6 +954,7 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
 	struct dsi_display *display;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
 	int i, rc = 0;
+	bool enable;
 
 	if (!connector || !connector->state) {
 		DSI_ERR("invalid connector or connector state\n");
@@ -978,6 +1000,12 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
 	/* ensure dynamic clk switch flag is reset */
 	c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;
 
+	if (params->qsync_update) {
+		enable = (params->qsync_mode > 0) ? true : false;
+		display_for_each_ctrl(i, display)
+			dsi_ctrl_setup_avr(display->ctrl[i].ctrl, enable);
+	}
+
 	return 0;
 }
 

+ 13 - 1
msm/dsi/dsi_drm.h

@@ -120,9 +120,11 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector,
 /**
  * dsi_display_post_kickoff - program post kickoff-time features
  * @connector: Pointer to drm connector structure
+ * @params: Parameters for post kickoff programming
  * Returns: Zero on success
  */
-int dsi_conn_post_kickoff(struct drm_connector *connector);
+int dsi_conn_post_kickoff(struct drm_connector *connector,
+		struct msm_display_conn_params *params);
 
 /**
  * dsi_convert_to_drm_mode - Update drm mode with dsi mode information
@@ -134,4 +136,14 @@ void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 
 u64 dsi_drm_find_bit_clk_rate(void *display,
 			      const struct drm_display_mode *drm_mode);
+
+/**
+ * dsi_conn_prepare_commit - program pre commit time features
+ * @display: Pointer to private display structure
+ * @params: Parameters for pre commit programming
+ * Returns: Zero on success
+ */
+int dsi_conn_prepare_commit(void *display,
+		struct msm_display_conn_params *params);
+
 #endif /* _DSI_DRM_H_ */

+ 34 - 18
msm/dsi/dsi_panel.c

@@ -499,6 +499,13 @@ static int dsi_panel_power_off(struct dsi_panel *panel)
 	if (gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio))
 		gpio_set_value(panel->reset_config.lcd_mode_sel_gpio, 0);
 
+	if (gpio_is_valid(panel->panel_test_gpio)) {
+		rc = gpio_direction_input(panel->panel_test_gpio);
+		if (rc)
+			DSI_WARN("set dir for panel test gpio failed rc=%d\n",
+				 rc);
+	}
+
 	rc = dsi_panel_set_pinctrl_state(panel, false);
 	if (rc) {
 		DSI_ERR("[%s] failed set pinctrl state, rc=%d\n", panel->name,
@@ -2492,6 +2499,7 @@ static int dsi_panel_parse_phy_timing(struct dsi_display_mode *mode,
 	u32 len, i;
 	int rc = 0;
 	struct dsi_display_mode_priv_info *priv_info;
+	u64 pixel_clk_khz;
 
 	if (!mode || !mode->priv_info)
 		return -EINVAL;
@@ -2520,9 +2528,11 @@ static int dsi_panel_parse_phy_timing(struct dsi_display_mode *mode,
 		 *  function dsi_panel_calc_dsi_transfer_time( )
 		 *  as we set it based on dsi clock or mdp transfer time.
 		 */
-		mode->pixel_clk_khz = (DSI_H_TOTAL_DSC(&mode->timing) *
+		pixel_clk_khz = (DSI_H_TOTAL_DSC(&mode->timing) *
 				DSI_V_TOTAL(&mode->timing) *
-				mode->timing.refresh_rate) / 1000;
+				mode->timing.refresh_rate);
+		do_div(pixel_clk_khz, 1000);
+		mode->pixel_clk_khz = pixel_clk_khz;
 	}
 
 	return rc;
@@ -3257,13 +3267,14 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 	if (rc)
 		DSI_ERR("failed to parse dfps configuration, rc=%d\n", rc);
 
-	if (!(panel->dfps_caps.dfps_support)) {
-		/* qsync and dfps are mutually exclusive features */
-		rc = dsi_panel_parse_qsync_caps(panel, of_node);
-		if (rc)
-			DSI_DEBUG("failed to parse qsync features, rc=%d\n",
-					rc);
-	}
+	rc = dsi_panel_parse_qsync_caps(panel, of_node);
+	if (rc)
+		DSI_DEBUG("failed to parse qsync features, rc=%d\n", rc);
+
+	/* allow qsync support only if DFPS is with VFP approach */
+	if ((panel->dfps_caps.dfps_support) &&
+	    !(panel->dfps_caps.type == DSI_DFPS_IMMEDIATE_VFP))
+		panel->qsync_min_fps = 0;
 
 	rc = dsi_panel_parse_dyn_clk_caps(panel);
 	if (rc)
@@ -3570,7 +3581,8 @@ void dsi_panel_calc_dsi_transfer_time(struct dsi_host_common_cfg *config,
 		struct dsi_display_mode *mode, u32 frame_threshold_us)
 {
 	u32 frame_time_us,nslices;
-	u64 min_bitclk_hz, total_active_pixels, bits_per_line, pclk_rate_hz;
+	u64 min_bitclk_hz, total_active_pixels, bits_per_line, pclk_rate_hz,
+		dsi_transfer_time_us, pixel_clk_khz;
 	struct msm_display_dsc_info *dsc = mode->timing.dsc;
 	struct dsi_mode_info *timing = &mode->timing;
 	struct dsi_display_mode *display_mode;
@@ -3605,15 +3617,18 @@ void dsi_panel_calc_dsi_transfer_time(struct dsi_host_common_cfg *config,
 					* timing->v_active));
 		/* calculate the actual bitclk needed to transfer the frame */
 		min_bitclk_hz = (total_active_pixels * (timing->refresh_rate) *
-				(config->bpp)) / (config->num_data_lanes);
+				(config->bpp));
+		do_div(min_bitclk_hz, config->num_data_lanes);
 	}
 
 	timing->min_dsi_clk_hz = min_bitclk_hz;
 
 	if (timing->clk_rate_hz) {
 		/* adjust the transfer time proportionately for bit clk*/
-		timing->dsi_transfer_time_us = mult_frac(frame_time_us,
-				min_bitclk_hz, timing->clk_rate_hz);
+		dsi_transfer_time_us = frame_time_us * min_bitclk_hz;
+		do_div(dsi_transfer_time_us, timing->clk_rate_hz);
+		timing->dsi_transfer_time_us = dsi_transfer_time_us;
+
 	} else if (mode->priv_info->mdp_transfer_time_us) {
 		timing->dsi_transfer_time_us =
 			mode->priv_info->mdp_transfer_time_us;
@@ -3655,13 +3670,14 @@ void dsi_panel_calc_dsi_transfer_time(struct dsi_host_common_cfg *config,
 	}
 
 	/* Calculate pclk_khz to update modeinfo */
-	pclk_rate_hz = mult_frac(min_bitclk_hz, frame_time_us,
-			timing->dsi_transfer_time_us);
+	pclk_rate_hz =  min_bitclk_hz * frame_time_us;
+	do_div(pclk_rate_hz, timing->dsi_transfer_time_us);
 
-	display_mode->pixel_clk_khz = mult_frac(pclk_rate_hz,
-			config->num_data_lanes, config->bpp);
+	pixel_clk_khz = pclk_rate_hz * config->num_data_lanes;
+	do_div(pixel_clk_khz, config->bpp);
+	display_mode->pixel_clk_khz = pixel_clk_khz;
 
-	do_div(display_mode->pixel_clk_khz, 1000);
+	display_mode->pixel_clk_khz =  display_mode->pixel_clk_khz / 1000;
 }
 
 

+ 3 - 3
msm/dsi/dsi_phy_timing_v4_0.c

@@ -26,7 +26,7 @@ int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult)
 	s64 rec_temp2, rec_temp3;
 
 	rec_temp2 = rec_temp1;
-	rec_temp3 = roundup(div_s64(rec_temp2, 8), mult);
+	rec_temp3 = roundup64(div_s64(rec_temp2, 8), mult);
 	return (div_s64(rec_temp3, mult) - 1);
 }
 
@@ -37,7 +37,7 @@ int32_t dsi_phy_hw_v4_0_calc_clk_trail_rec_min(s64 temp_mul,
 
 	rec_temp1 = temp_mul;
 	rec_temp2 = div_s64(rec_temp1, 8);
-	rec_temp3 = roundup(rec_temp2, mult);
+	rec_temp3 = roundup64(rec_temp2, mult);
 	return (div_s64(rec_temp3, mult) - 1);
 }
 
@@ -53,7 +53,7 @@ int32_t dsi_phy_hw_v4_0_calc_hs_zero(s64 temp1, s64 mult)
 {
 	s64 rec_temp2, rec_min;
 
-	rec_temp2 = roundup((temp1 / 8), mult);
+	rec_temp2 = roundup64((temp1 / 8), mult);
 	rec_min = rec_temp2 - (1 * mult);
 	return div_s64(rec_min, mult);
 }

+ 8 - 2
msm/msm_drv.h

@@ -553,12 +553,18 @@ struct msm_roi_list {
 /**
  * struct - msm_display_kickoff_params - info for display features at kickoff
  * @rois: Regions of interest structure for mapping CRTC to Connector output
- * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode
- * @qsync_update: Qsync settings were changed/updated
  */
 struct msm_display_kickoff_params {
 	struct msm_roi_list *rois;
 	struct drm_msm_ext_hdr_metadata *hdr_meta;
+};
+
+/**
+ * struct - msm_display_conn_params - info of dpu display features
+ * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode 2: oneshot
+ * @qsync_update: Qsync settings were changed/updated
+ */
+struct msm_display_conn_params {
 	uint32_t qsync_mode;
 	bool qsync_update;
 };

+ 2 - 1
msm/msm_smmu.c

@@ -94,6 +94,7 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
 		return;
 
 	pm_runtime_get_sync(mmu->dev);
+	msm_dma_unmap_all_for_dev(client->dev);
 	iommu_detach_device(client->domain, client->dev);
 	pm_runtime_put_sync(mmu->dev);
 
@@ -451,7 +452,7 @@ static int msm_smmu_probe(struct platform_device *pdev)
 		client->dev->dma_parms = devm_kzalloc(client->dev,
 				sizeof(*client->dev->dma_parms), GFP_KERNEL);
 	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
-	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));
+	dma_set_seg_boundary(client->dev, (unsigned long)DMA_BIT_MASK(64));
 
 	iommu_set_fault_handler(client->domain,
 			msm_smmu_fault_handler, (void *)client);

+ 111 - 15
msm/sde/sde_connector.c

@@ -58,6 +58,7 @@ static const struct drm_prop_enum_list e_power_mode[] = {
 static const struct drm_prop_enum_list e_qsync_mode[] = {
 	{SDE_RM_QSYNC_DISABLED,	"none"},
 	{SDE_RM_QSYNC_CONTINUOUS_MODE,	"continuous"},
+	{SDE_RM_QSYNC_ONE_SHOT_MODE,	"one_shot"},
 };
 static const struct drm_prop_enum_list e_frame_trigger_mode[] = {
 	{FRAME_DONE_WAIT_DEFAULT, "default"},
@@ -621,21 +622,51 @@ void sde_connector_set_colorspace(struct sde_connector *c_conn)
 
 void sde_connector_set_qsync_params(struct drm_connector *connector)
 {
-	struct sde_connector *c_conn = to_sde_connector(connector);
-	u32 qsync_propval;
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+	u32 qsync_propval = 0;
+	bool prop_dirty;
 
 	if (!connector)
 		return;
 
+	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(connector->state);
 	c_conn->qsync_updated = false;
-	qsync_propval = sde_connector_get_property(c_conn->base.state,
-			CONNECTOR_PROP_QSYNC_MODE);
 
-	if (qsync_propval != c_conn->qsync_mode) {
-		SDE_DEBUG("updated qsync mode %d -> %d\n", c_conn->qsync_mode,
-				qsync_propval);
-		c_conn->qsync_updated = true;
-		c_conn->qsync_mode = qsync_propval;
+	prop_dirty = msm_property_is_dirty(&c_conn->property_info,
+					&c_state->property_state,
+					CONNECTOR_PROP_QSYNC_MODE);
+	if (prop_dirty) {
+		qsync_propval = sde_connector_get_property(c_conn->base.state,
+						CONNECTOR_PROP_QSYNC_MODE);
+		if (qsync_propval != c_conn->qsync_mode) {
+			SDE_DEBUG("updated qsync mode %d -> %d\n",
+					c_conn->qsync_mode, qsync_propval);
+			c_conn->qsync_updated = true;
+			c_conn->qsync_mode = qsync_propval;
+		}
+	}
+}
+
+void sde_connector_complete_qsync_commit(struct drm_connector *conn,
+				struct msm_display_conn_params *params)
+{
+	struct sde_connector *c_conn;
+
+	if (!conn || !params) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	c_conn = to_sde_connector(conn);
+
+	if (c_conn && c_conn->qsync_updated &&
+		(c_conn->qsync_mode == SDE_RM_QSYNC_ONE_SHOT_MODE)) {
+		/* Reset qsync states if mode is one shot */
+		params->qsync_mode = c_conn->qsync_mode = 0;
+		params->qsync_update = true;
+		SDE_EVT32(conn->base.id, c_conn->qsync_mode);
 	}
 }
 
@@ -728,6 +759,7 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
 	struct sde_connector *c_conn;
 	struct sde_connector_state *c_state;
 	struct msm_display_kickoff_params params;
+	struct dsi_display *display;
 	int rc;
 
 	if (!connector) {
@@ -742,6 +774,15 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
 		return -EINVAL;
 	}
 
+	/*
+	 * During pre kickoff DCS commands have to have an
+	 * asynchronous wait to avoid an unnecessary stall
+	 * in pre-kickoff. This flag must be reset at the
+	 * end of display pre-kickoff.
+	 */
+	display = (struct dsi_display *)c_conn->display;
+	display->queue_cmd_waits = true;
+
 	rc = _sde_connector_update_dirty_properties(connector);
 	if (rc) {
 		SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
@@ -753,19 +794,50 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
 
 	params.rois = &c_state->rois;
 	params.hdr_meta = &c_state->hdr_meta;
-	params.qsync_update = false;
+
+	SDE_EVT32_VERBOSE(connector->base.id);
+
+	rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
+
+	display->queue_cmd_waits = false;
+end:
+	return rc;
+}
+
+int sde_connector_prepare_commit(struct drm_connector *connector)
+{
+	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+	struct msm_display_conn_params params;
+	int rc;
+
+	if (!connector) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(connector->state);
+	if (!c_conn->display) {
+		SDE_ERROR("invalid connector display\n");
+		return -EINVAL;
+	}
+
+	if (!c_conn->ops.prepare_commit)
+		return 0;
+
+	memset(&params, 0, sizeof(params));
 
 	if (c_conn->qsync_updated) {
 		params.qsync_mode = c_conn->qsync_mode;
 		params.qsync_update = true;
-		SDE_EVT32(connector->base.id, params.qsync_mode);
 	}
 
-	SDE_EVT32_VERBOSE(connector->base.id);
+	rc = c_conn->ops.prepare_commit(c_conn->display, &params);
 
-	rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
+	SDE_EVT32(connector->base.id, params.qsync_mode,
+		  params.qsync_update, rc);
 
-end:
 	return rc;
 }
 
@@ -1376,6 +1448,10 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
 		if (rc)
 			SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
 		break;
+	case CONNECTOR_PROP_QSYNC_MODE:
+		msm_property_set_dirty(&c_conn->property_info,
+				&c_state->property_state, idx);
+		break;
 	default:
 		break;
 	}
@@ -2027,13 +2103,33 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
 		struct drm_connector_state *new_conn_state)
 {
 	struct sde_connector *c_conn;
+	struct sde_connector_state *c_state;
+	bool qsync_dirty = false, has_modeset = false;
 
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
-		return 0;
+		return -EINVAL;
+	}
+
+	if (!new_conn_state) {
+		SDE_ERROR("invalid connector state\n");
+		return -EINVAL;
 	}
 
 	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(new_conn_state);
+
+	has_modeset = sde_crtc_atomic_check_has_modeset(new_conn_state->state,
+						new_conn_state->crtc);
+	qsync_dirty = msm_property_is_dirty(&c_conn->property_info,
+					&c_state->property_state,
+					CONNECTOR_PROP_QSYNC_MODE);
+
+	SDE_DEBUG("has_modeset %d qsync_dirty %d\n", has_modeset, qsync_dirty);
+	if (has_modeset && qsync_dirty) {
+		SDE_ERROR("invalid qsync update during modeset\n");
+		return -EINVAL;
+	}
 
 	if (c_conn->ops.atomic_check)
 		return c_conn->ops.atomic_check(connector,

+ 31 - 1
msm/sde/sde_connector.h

@@ -229,9 +229,12 @@ struct sde_connector_ops {
 	/**
 	 * post_kickoff - display to program post kickoff-time features
 	 * @connector: Pointer to drm connector structure
+	 * @params: Parameter bundle of connector-stored information for
+	 *	post kickoff programming into the display
 	 * Returns: Zero on success
 	 */
-	int (*post_kickoff)(struct drm_connector *connector);
+	int (*post_kickoff)(struct drm_connector *connector,
+		struct msm_display_conn_params *params);
 
 	/**
 	 * post_open - calls connector to process post open functionalities
@@ -325,6 +328,16 @@ struct sde_connector_ops {
 	 * Returns: zero for success, negetive for failure
 	 */
 	int (*get_default_lms)(void *display, u32 *num_lm);
+
+	/**
+	 * prepare_commit - trigger display to program pre-commit time features
+	 * @display: Pointer to private display structure
+	 * @params: Parameter bundle of connector-stored information for
+	 *	pre commit time programming into the display
+	 * Returns: Zero on success
+	 */
+	int (*prepare_commit)(void *display,
+		struct msm_display_conn_params *params);
 };
 
 /**
@@ -732,6 +745,16 @@ int sde_connector_get_dpms(struct drm_connector *connector);
  */
 void sde_connector_set_qsync_params(struct drm_connector *connector);
 
+/**
+ * sde_connector_complete_qsync_commit - callback signalling completion
+ *			of qsync, if modified for the current commit
+ * @conn   - Pointer to drm connector object
+ * @params - Parameter bundle of connector-stored information for
+ *	post kickoff programming into the display
+ */
+void sde_connector_complete_qsync_commit(struct drm_connector *conn,
+			struct msm_display_conn_params *params);
+
 /**
 * sde_connector_get_dyn_hdr_meta - returns pointer to connector state's dynamic
 *				   HDR metadata info
@@ -801,6 +824,13 @@ int sde_connector_register_custom_event(struct sde_kms *kms,
  */
 int sde_connector_pre_kickoff(struct drm_connector *connector);
 
+/**
+ * sde_connector_prepare_commit - trigger commit time feature programming
+ * @connector: Pointer to drm connector object
+ * Returns: Zero on success
+ */
+int sde_connector_prepare_commit(struct drm_connector *connector);
+
 /**
  * sde_connector_needs_offset - adjust the output fence offset based on
  *                              display type

+ 3 - 2
msm/sde/sde_crtc.c

@@ -276,7 +276,7 @@ static ssize_t measured_fps_show(struct device *device,
 {
 	struct drm_crtc *crtc;
 	struct sde_crtc *sde_crtc;
-	unsigned int fps_int, fps_decimal;
+	uint64_t fps_int, fps_decimal;
 	u64 fps = 0, frame_count = 0;
 	ktime_t current_time;
 	int i = 0, current_time_index;
@@ -353,7 +353,7 @@ static ssize_t measured_fps_show(struct device *device,
 		}
 	}
 
-	fps_int = (unsigned int) sde_crtc->fps_info.measured_fps;
+	fps_int = (uint64_t) sde_crtc->fps_info.measured_fps;
 	fps_decimal = do_div(fps_int, 10);
 	return scnprintf(buf, PAGE_SIZE,
 	"fps: %d.%d duration:%d frame_count:%lld\n", fps_int, fps_decimal,
@@ -1783,6 +1783,7 @@ int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
 
 	smmu_state = &sde_kms->smmu_state;
 	smmu_state->prev_state = smmu_state->state;
+	smmu_state->prev_secure_level = smmu_state->secure_level;
 
 	sde_crtc = to_sde_crtc(crtc);
 	secure_level = sde_crtc_get_secure_level(crtc, crtc->state);

+ 19 - 0
msm/sde/sde_crtc.h

@@ -714,6 +714,25 @@ static inline int sde_crtc_get_secure_level(struct drm_crtc *crtc,
 			CRTC_PROP_SECURITY_LEVEL);
 }
 
+/** sde_crtc_atomic_check_has_modeset - checks if the new_crtc_state in the
+ *	drm_atomic_state has a modeset
+ * @state : pointer to drm_atomic_state
+ * @crtc : Pointer to drm crtc structure
+ * Returns true if crtc has modeset
+ */
+static inline bool sde_crtc_atomic_check_has_modeset(
+	struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+	struct drm_crtc_state *crtc_state;
+
+	if (!state || !crtc)
+		return false;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state,
+					crtc);
+	return (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state));
+}
+
 /**
  * sde_crtc_get_secure_transition - determines the operations to be
  * performed before transitioning to secure state

+ 18 - 8
msm/sde/sde_encoder.c

@@ -2581,8 +2581,8 @@ static int _sde_encoder_rc_idle(struct drm_encoder *drm_enc,
 		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
 				SDE_EVTLOG_ERROR);
 		goto end;
-	} else if (sde_crtc_frame_pending(sde_enc->crtc) > 1) {
-		SDE_ERROR_ENC(sde_enc, "skip idle entry");
+	} else if (sde_crtc_frame_pending(sde_enc->crtc)) {
+		SDE_DEBUG_ENC(sde_enc, "skip idle entry");
 		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
 			sde_crtc_frame_pending(sde_enc->crtc),
 			SDE_EVTLOG_ERROR);
@@ -4703,11 +4703,6 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc));
 
-	/* update the qsync parameters for the current frame */
-	if (sde_enc->cur_master)
-		sde_connector_set_qsync_params(
-				sde_enc->cur_master->connector);
-
 	is_cmd_mode = sde_encoder_check_curr_mode(drm_enc,
 				MSM_DISPLAY_CMD_MODE);
 	if (sde_enc->cur_master && sde_enc->cur_master->connector
@@ -4960,7 +4955,7 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
-	int i;
+	int i, rc = 0;
 	struct sde_hw_ctl *ctl;
 
 	if (!drm_enc) {
@@ -4969,6 +4964,11 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
 	}
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
+	/* update the qsync parameters for the current frame */
+	if (sde_enc->cur_master)
+		sde_connector_set_qsync_params(
+				sde_enc->cur_master->connector);
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		phys = sde_enc->phys_encs[i];
 		if (phys && phys->ops.prepare_commit)
@@ -4986,6 +4986,16 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
 				ctl->ops.clear_pending_flush(ctl);
 		}
 	}
+
+	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
+		rc = sde_connector_prepare_commit(
+				  sde_enc->cur_master->connector);
+		if (rc)
+			SDE_ERROR_ENC(sde_enc,
+				      "prepare commit failed conn %d rc %d\n",
+				      sde_enc->cur_master->connector->base.id,
+				      rc);
+	}
 }
 
 void sde_encoder_helper_setup_misr(struct sde_encoder_phys *phys_enc,

+ 3 - 0
msm/sde/sde_encoder_phys_cmd.c

@@ -1433,6 +1433,8 @@ static bool _sde_encoder_phys_cmd_needs_vsync_change(
 static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		struct sde_encoder_phys *phys_enc)
 {
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
 	struct sde_encoder_wait_info wait_info = {0};
 	int ret;
 	bool frame_pending = true;
@@ -1463,6 +1465,7 @@ static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 		ret = frame_pending ? ret : 0;
 	}
 
+	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;
 	return ret;
 }
 

+ 16 - 4
msm/sde/sde_encoder_phys_vid.c

@@ -332,7 +332,7 @@ static void _sde_encoder_phys_vid_setup_avr(
 			return;
 		}
 
-		if (qsync_min_fps >= default_fps) {
+		if (qsync_min_fps > default_fps) {
 			SDE_ERROR_VIDENC(vid_enc,
 				"qsync fps %d must be less than default %d\n",
 				qsync_min_fps, default_fps);
@@ -977,9 +977,6 @@ static int sde_encoder_phys_vid_prepare_for_kickoff(
 		vid_enc->error_count = 0;
 	}
 
-	if (sde_connector_is_qsync_updated(phys_enc->connector))
-		_sde_encoder_phys_vid_avr_ctrl(phys_enc);
-
 	return rc;
 }
 
@@ -1124,6 +1121,20 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
 	}
 }
 
+static void sde_encoder_phys_vid_prepare_for_commit(
+		struct sde_encoder_phys *phys_enc)
+{
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder parameters\n");
+		return;
+	}
+
+	if (sde_connector_is_qsync_updated(phys_enc->connector))
+		_sde_encoder_phys_vid_avr_ctrl(phys_enc);
+
+}
+
 static void sde_encoder_phys_vid_irq_control(struct sde_encoder_phys *phys_enc,
 		bool enable)
 {
@@ -1258,6 +1269,7 @@ static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
 	ops->get_wr_line_count = sde_encoder_phys_vid_get_line_count;
 	ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
 	ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
+	ops->prepare_commit = sde_encoder_phys_vid_prepare_for_commit;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_vid_init(

+ 7 - 6
msm/sde/sde_hw_intf.c

@@ -83,9 +83,6 @@
 #define INTF_TEAR_AUTOREFRESH_CONFIG    0x2B4
 #define INTF_TEAR_TEAR_DETECT_CTRL      0x2B8
 
-#define AVR_CONTINUOUS_MODE   1
-#define AVR_ONE_SHOT_MODE     2
-
 static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -129,7 +126,7 @@ static int sde_hw_intf_avr_setup(struct sde_hw_intf *ctx,
 	u32 min_fps, default_fps, diff_fps;
 	u32 vsync_period_slow;
 	u32 avr_vtotal;
-	u32 add_porches;
+	u32 add_porches = 0;
 
 	if (!ctx || !params || !avr_params) {
 		SDE_ERROR("invalid input parameter(s)\n");
@@ -146,7 +143,10 @@ static int sde_hw_intf_avr_setup(struct sde_hw_intf *ctx,
 	vsync_period = params->vsync_pulse_width +
 			params->v_back_porch + params->height +
 			params->v_front_porch;
-	add_porches = mult_frac(vsync_period, diff_fps, min_fps);
+
+	if (diff_fps)
+		add_porches = mult_frac(vsync_period, diff_fps, min_fps);
+
 	vsync_period_slow = vsync_period + add_porches;
 	avr_vtotal = vsync_period_slow * hsync_period;
 
@@ -168,7 +168,8 @@ static void sde_hw_intf_avr_ctrl(struct sde_hw_intf *ctx,
 	c = &ctx->hw;
 	if (avr_params->avr_mode) {
 		avr_ctrl = BIT(0);
-		avr_mode = (avr_params->avr_mode == AVR_ONE_SHOT_MODE) ?
+		avr_mode =
+		(avr_params->avr_mode == SDE_RM_QSYNC_ONE_SHOT_MODE) ?
 			(BIT(0) | BIT(8)) : 0x0;
 	}
 

+ 105 - 46
msm/sde/sde_kms.c

@@ -46,6 +46,7 @@
 #include "sde_plane.h"
 #include "sde_crtc.h"
 #include "sde_reg_dma.h"
+#include "sde_connector.h"
 
 #include <soc/qcom/scm.h>
 #include "soc/qcom/secure_buffer.h"
@@ -334,88 +335,118 @@ static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
 
 static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
 {
-	u32 ret = 0;
+	u32 ret;
 
 	if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
-		goto end;
+		return 0;
 
 	/* detach_all_contexts */
 	ret = sde_kms_mmu_detach(sde_kms, false);
 	if (ret) {
 		SDE_ERROR("failed to detach all cb ret:%d\n", ret);
-		goto end;
+		goto mmu_error;
 	}
 
 	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
+	if (ret) {
+		SDE_ERROR("scm call failed for vmid:%d\n", vmid);
+		goto scm_error;
+	}
 
-end:
+	return 0;
+
+scm_error:
+	sde_kms_mmu_attach(sde_kms, false);
+mmu_error:
+	atomic_dec(&sde_kms->detach_all_cb);
 	return ret;
 }
 
-static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, int vmid)
+static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, u32 vmid,
+		u32 old_vmid)
 {
-	u32 ret = 0;
+	u32 ret;
 
 	if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
-		goto end;
+		return 0;
 
 	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
+	if (ret) {
+		SDE_ERROR("scm call failed for vmid:%d\n", vmid);
+		goto scm_error;
+	}
 
 	/* attach_all_contexts */
 	ret = sde_kms_mmu_attach(sde_kms, false);
 	if (ret) {
 		SDE_ERROR("failed to attach all cb ret:%d\n", ret);
-		goto end;
+		goto mmu_error;
 	}
 
-end:
+	return 0;
+
+mmu_error:
+	_sde_kms_scm_call(sde_kms, old_vmid);
+scm_error:
+	atomic_inc(&sde_kms->detach_all_cb);
 	return ret;
 }
 
 static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
 {
-	u32 ret = 0;
+	u32 ret;
 
 	if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
-		goto end;
+		return 0;
 
 	/* detach secure_context */
 	ret = sde_kms_mmu_detach(sde_kms, true);
 	if (ret) {
 		SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
-		goto end;
+		goto mmu_error;
 	}
 
 	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
+	if (ret) {
+		SDE_ERROR("scm call failed for vmid:%d\n", vmid);
+		goto scm_error;
+	}
 
-end:
+	return 0;
+
+scm_error:
+	sde_kms_mmu_attach(sde_kms, true);
+mmu_error:
+	atomic_dec(&sde_kms->detach_sec_cb);
 	return ret;
 }
 
-static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, int vmid)
+static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, u32 vmid,
+		u32 old_vmid)
 {
-	u32 ret = 0;
+	u32 ret;
 
 	if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
-		goto end;
+		return 0;
 
 	ret = _sde_kms_scm_call(sde_kms, vmid);
-	if (ret)
-		goto end;
+	if (ret) {
+		goto scm_error;
+		SDE_ERROR("scm call failed for vmid:%d\n", vmid);
+	}
 
 	ret = sde_kms_mmu_attach(sde_kms, true);
 	if (ret) {
 		SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
-		goto end;
+		goto mmu_error;
 	}
 
-end:
+	return 0;
+
+mmu_error:
+	_sde_kms_scm_call(sde_kms, old_vmid);
+scm_error:
+	atomic_inc(&sde_kms->detach_sec_cb);
 	return ret;
 }
 
@@ -435,6 +466,7 @@ static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
 
 		ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
 		if (ret) {
+			sde_crtc_misr_setup(crtc, false, 0);
 			pm_runtime_put_sync(sde_kms->dev->dev);
 			return ret;
 		}
@@ -473,8 +505,10 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 	/* enable sui misr if requested, before the transition */
 	if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
 		ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
-		if (ret)
+		if (ret) {
+			smmu_state->sui_misr_state == NONE;
 			goto end;
+		}
 	}
 
 	mutex_lock(&sde_kms->secure_transition_lock);
@@ -486,7 +520,8 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 		break;
 
 	case ATTACH_ALL_REQ:
-		ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL);
+		ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL,
+				VMID_CP_SEC_DISPLAY);
 		if (!ret) {
 			smmu_state->state = ATTACHED;
 			smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
@@ -503,7 +538,9 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 		break;
 
 	case ATTACH_SEC_REQ:
-		ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL);
+		vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
+				VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
+		ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL, vmid);
 		if (!ret) {
 			smmu_state->state = ATTACHED;
 			smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
@@ -527,29 +564,34 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 	}
 
 end:
-	smmu_state->sui_misr_state = NONE;
-	smmu_state->transition_type = NONE;
 	smmu_state->transition_error = false;
 
-	/*
-	 * If switch failed, toggling secure_level is enough since
-	 * there are only two secure levels - secure/non-secure
-	 */
 	if (ret) {
 		smmu_state->transition_error = true;
+		SDE_ERROR(
+		  "crtc%d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
+			DRMID(crtc), old_smmu_state, smmu_state->state,
+			smmu_state->secure_level, ret);
+
 		smmu_state->state = smmu_state->prev_state;
-		smmu_state->secure_level = !smmu_state->secure_level;
+		smmu_state->secure_level = smmu_state->prev_secure_level;
+
+		if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ)
+			_sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
 	}
 
-	SDE_DEBUG(
-		"crtc %d: old_state %d, req_state %d, new_state %d, sec_lvl %d, ret %d\n",
-			DRMID(crtc), smmu_state->prev_state, old_smmu_state,
-			smmu_state->state, smmu_state->secure_level, ret);
-	SDE_EVT32(DRMID(crtc), smmu_state->prev_state,
-			smmu_state->state, smmu_state->transition_type,
-			smmu_state->transition_error, smmu_state->secure_level,
+	SDE_DEBUG("crtc %d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
+			DRMID(crtc), old_smmu_state, smmu_state->state,
+			smmu_state->secure_level, ret);
+	SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->prev_state,
+			smmu_state->transition_type,
+			smmu_state->transition_error,
+			smmu_state->secure_level, smmu_state->prev_secure_level,
 			smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
 
+	smmu_state->sui_misr_state = NONE;
+	smmu_state->transition_type = NONE;
+
 	return ret;
 }
 
@@ -565,6 +607,7 @@ static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
 	struct drm_device *dev = sde_kms->dev;
 	int i, ops = 0, ret = 0;
 	bool old_valid_fb = false;
+	struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
 
 	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
 		if (!crtc->state || !crtc->state->active)
@@ -601,8 +644,10 @@ static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
 			return ops;
 		}
 
-		if (!ops)
+		if (!ops) {
+			smmu_state->transition_error = false;
 			goto no_ops;
+		}
 
 		SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
 				crtc->base.id, ops, crtc->state);
@@ -948,6 +993,7 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
 	struct drm_crtc_state *old_crtc_state;
 	struct drm_connector *connector;
 	struct drm_connector_state *old_conn_state;
+	struct msm_display_conn_params params;
 	int i, rc = 0;
 
 	if (!kms || !old_state)
@@ -980,7 +1026,12 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
 		c_conn = to_sde_connector(connector);
 		if (!c_conn->ops.post_kickoff)
 			continue;
-		rc = c_conn->ops.post_kickoff(connector);
+
+		memset(&params, 0, sizeof(params));
+
+		sde_connector_complete_qsync_commit(connector, &params);
+
+		rc = c_conn->ops.post_kickoff(connector, &params);
 		if (rc) {
 			pr_err("Connector Post kickoff failed rc=%d\n",
 					 rc);
@@ -2641,7 +2692,6 @@ static int sde_kms_pm_suspend(struct device *dev)
 
 	sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
 	SDE_EVT32(0);
-	pm_runtime_put_noidle(dev);
 
 	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
@@ -2750,6 +2800,15 @@ unlock:
 	}
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
+
+	/*
+	 * pm runtime driver avoids multiple runtime_suspend API call by
+	 * checking runtime_status. However, this call helps when there is a
+	 * race condition between pm_suspend call and doze_suspend/power_off
+	 * commit. It removes the extra vote from suspend and adds it back
+	 * later to allow power collapse during pm_suspend call
+	 */
+	pm_runtime_put_sync(dev);
 	pm_runtime_get_noresume(dev);
 
 	return ret;

+ 2 - 0
msm/sde/sde_kms.h

@@ -189,6 +189,7 @@ enum frame_trigger_mode_type {
  * @state: current state of smmu context banks
  * @prev_state: previous state of smmu context banks
  * @secure_level: secure level cached from crtc
+ * @prev_secure_level: previous secure level
  * @transition_type: transition request type
  * @transition_error: whether there is error while transitioning the state
  */
@@ -196,6 +197,7 @@ struct sde_kms_smmu_state_data {
 	uint32_t state;
 	uint32_t prev_state;
 	uint32_t secure_level;
+	uint32_t prev_secure_level;
 	uint32_t transition_type;
 	uint32_t transition_error;
 	uint32_t sui_misr_state;

+ 4 - 11
msm/sde/sde_rm.c

@@ -754,7 +754,7 @@ static bool _sde_rm_check_lm(
 		struct sde_rm_hw_blk **ds,
 		struct sde_rm_hw_blk **pp)
 {
-	bool is_valid_dspp, is_valid_ds, ret;
+	bool is_valid_dspp, is_valid_ds, ret = true;
 
 	is_valid_dspp = (lm_cfg->dspp != DSPP_MAX) ? true : false;
 	is_valid_ds = (lm_cfg->ds != DS_MAX) ? true : false;
@@ -771,8 +771,6 @@ static bool _sde_rm_check_lm(
 		ret = is_valid_dspp;
 	else if (RM_RQ_DS(reqs))
 		ret = is_valid_ds;
-	else
-		ret = !(is_valid_dspp || is_valid_ds);
 
 	if (!ret) {
 		SDE_DEBUG(
@@ -1496,17 +1494,12 @@ static int _sde_rm_make_lm_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
 		if (splash_display->lm_cnt != reqs->topology->num_lm)
 			SDE_DEBUG("Configured splash LMs != needed LM cnt\n");
 	}
+
 	/*
-	 * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
-	 * Do assignment preferring to give away low-resource mixers first:
-	 * - Check mixers without DSPPs
-	 * - Only then allow to grab from mixers with DSPP capability
+	 * Assign LMs and blocks whose usage is tied to them:
+	 * DSPP & Pingpong.
 	 */
 	ret = _sde_rm_reserve_lms(rm, rsvp, reqs, hw_ids);
-	if (ret && !RM_RQ_DSPP(reqs)) {
-		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
-		ret = _sde_rm_reserve_lms(rm, rsvp, reqs, hw_ids);
-	}
 
 	return ret;
 }

+ 3 - 0
msm/sde/sde_rm.h

@@ -67,10 +67,13 @@ enum sde_rm_topology_control {
  *                              disable state.
  * @SDE_RM_QSYNC_CONTINUOUS_MODE: If set, Qsync is enabled in continuous
  *                              mode.
+ * @SDE_RM_QSYNC_ONE_SHOT_MODE: If set, Qsync is enabled in one shot mode.
+ *
  */
 enum sde_rm_qsync_modes {
 	SDE_RM_QSYNC_DISABLED,
 	SDE_RM_QSYNC_CONTINUOUS_MODE,
+	SDE_RM_QSYNC_ONE_SHOT_MODE
 };
 
 /**

+ 55 - 18
msm/sde_dbg.c

@@ -45,6 +45,8 @@
 #define DBGBUS_PERIPH	0x418
 
 #define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
+#define TEST_EXT_MASK(id, tp)	(((tp >> 3) << 24) | (id << 4) \
+		| ((tp & 0x7) << 1) | BIT(0))
 
 /* following offsets are with respect to MDP VBIF base for DBG BUS access */
 #define MMSS_VBIF_CLKON			0x4
@@ -271,6 +273,13 @@ static void _sde_debug_bus_ppb0_dump(void __iomem *mem_base,
 			entry->wr_addr, entry->block_id, entry->test_id, val);
 }
 
+static void _sde_debug_bus_ltm_dump(void __iomem *mem_base,
+		struct sde_debug_bus_entry *entry, u32 val)
+{
+	dev_info(sde_dbg_base.dev, "ltm 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
 static void _sde_debug_bus_ppb1_dump(void __iomem *mem_base,
 		struct sde_debug_bus_entry *entry, u32 val)
 {
@@ -1997,23 +2006,47 @@ static struct sde_debug_bus_entry dbg_bus_sde_kona[] = {
 	{ DBGBUS_DSPP, 9, 0},
 
 	/* ltm */
-	{ DBGBUS_DSPP, 45, 0},
-	{ DBGBUS_DSPP, 45, 1},
-	{ DBGBUS_DSPP, 45, 2},
-	{ DBGBUS_DSPP, 45, 3},
-	{ DBGBUS_DSPP, 45, 4},
-	{ DBGBUS_DSPP, 45, 5},
-	{ DBGBUS_DSPP, 45, 6},
-	{ DBGBUS_DSPP, 45, 7},
-
-	{ DBGBUS_DSPP, 46, 0},
-	{ DBGBUS_DSPP, 46, 1},
-	{ DBGBUS_DSPP, 46, 2},
-	{ DBGBUS_DSPP, 46, 3},
-	{ DBGBUS_DSPP, 46, 4},
-	{ DBGBUS_DSPP, 46, 5},
-	{ DBGBUS_DSPP, 46, 6},
-	{ DBGBUS_DSPP, 46, 7},
+	{ DBGBUS_DSPP, 45, 0, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 1, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 2, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 3, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 4, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 5, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 6, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 7, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 8, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 9, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 10, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 11, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 12, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 13, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 14, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 15, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 16, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 17, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 18, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 45, 31, _sde_debug_bus_ltm_dump},
+
+	{ DBGBUS_DSPP, 46, 0, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 1, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 2, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 3, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 4, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 5, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 6, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 7, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 8, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 9, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 10, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 11, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 12, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 13, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 14, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 15, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 16, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 17, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 18, _sde_debug_bus_ltm_dump},
+	{ DBGBUS_DSPP, 46, 31, _sde_debug_bus_ltm_dump},
 
 	/* blend */
 	/* LM0 */
@@ -3216,7 +3249,11 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
 
 	for (i = 0; i < bus->cmn.entries_size; i++) {
 		head = bus->entries + i;
-		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+		if (head->test_id > 0x7)
+			writel_relaxed(TEST_EXT_MASK(head->block_id,
+				head->test_id), mem_base + head->wr_addr);
+		else
+			writel_relaxed(TEST_MASK(head->block_id, head->test_id),
 				mem_base + head->wr_addr);
 		wmb(); /* make sure test bits were written */
 

+ 2 - 2
msm/sde_power_handle.h

@@ -14,8 +14,8 @@
 #define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	3000000000
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	3000000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA	3000000000ULL
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	3000000000ULL
 
 #include <linux/sde_io_util.h>
 #include <soc/qcom/cx_ipeak.h>

+ 88 - 86
msm/sde_rsc.c

@@ -15,7 +15,6 @@
 #include <linux/mutex.h>
 #include <linux/of_platform.h>
 #include <linux/module.h>
-#include <linux/pm_runtime.h>
 #include <linux/msm-bus.h>
 
 #include <soc/qcom/rpmh.h>
@@ -399,6 +398,86 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
 	return ret;
 }
 
+static int sde_rsc_resource_disable(struct sde_rsc_priv *rsc)
+{
+	struct dss_module_power *mp;
+	u32 reg_bus_hdl;
+
+	if (!rsc) {
+		pr_err("invalid drv data\n");
+		return -EINVAL;
+	}
+
+	if (atomic_read(&rsc->resource_refcount) == 0) {
+		pr_err("%pS: invalid rsc resource disable call\n",
+			__builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	if (atomic_dec_return(&rsc->resource_refcount) != 0)
+		return 0;
+
+	mp = &rsc->phandle.mp;
+	msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	reg_bus_hdl = rsc->phandle.reg_bus_hdl;
+	if (reg_bus_hdl)
+		msm_bus_scale_client_update_request(reg_bus_hdl,
+				VOTE_INDEX_DISABLE);
+	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
+
+	return 0;
+}
+
+static int sde_rsc_resource_enable(struct sde_rsc_priv *rsc)
+{
+	struct dss_module_power *mp;
+	int rc = 0;
+	u32 reg_bus_hdl;
+
+	if (!rsc) {
+		pr_err("invalid drv data\n");
+		return -EINVAL;
+	}
+
+	if (atomic_inc_return(&rsc->resource_refcount) != 1)
+		return 0;
+
+	mp = &rsc->phandle.mp;
+	rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, true);
+	if (rc) {
+		pr_err("failed to enable vregs rc=%d\n", rc);
+		goto end;
+	}
+
+	reg_bus_hdl = rsc->phandle.reg_bus_hdl;
+	if (reg_bus_hdl) {
+		rc = msm_bus_scale_client_update_request(reg_bus_hdl,
+				VOTE_INDEX_LOW);
+		if (rc) {
+			pr_err("failed to set reg bus vote rc=%d\n", rc);
+			goto reg_bus_hdl_err;
+		}
+	}
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (rc) {
+		pr_err("clock enable failed rc:%d\n", rc);
+		goto clk_err;
+	}
+
+	return rc;
+
+clk_err:
+	if (reg_bus_hdl)
+		msm_bus_scale_client_update_request(reg_bus_hdl,
+				VOTE_INDEX_DISABLE);
+reg_bus_hdl_err:
+	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
+end:
+	atomic_dec(&rsc->resource_refcount);
+	return rc;
+}
+
 static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client,
@@ -824,7 +903,7 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
 		caller_client->name, state);
 
 	if (rsc->current_state == SDE_RSC_IDLE_STATE)
-		pm_runtime_get_sync(rsc->dev);
+		sde_rsc_resource_enable(rsc);
 
 	switch (state) {
 	case SDE_RSC_IDLE_STATE:
@@ -882,7 +961,7 @@ int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
 
 clk_disable:
 	if (rsc->current_state == SDE_RSC_IDLE_STATE)
-		pm_runtime_put_sync(rsc->dev);
+		sde_rsc_resource_disable(rsc);
 end:
 	mutex_unlock(&rsc->client_lock);
 	return rc;
@@ -959,7 +1038,7 @@ int sde_rsc_client_trigger_vote(struct sde_rsc_client *caller_client,
 		rsc->bw_config.ib_vote[i] = rsc->bw_config.new_ib_vote[i];
 	}
 
-	rc = pm_runtime_get_sync(rsc->dev);
+	rc = sde_rsc_resource_enable(rsc);
 	if (rc < 0)
 		goto clk_enable_fail;
 
@@ -990,7 +1069,7 @@ int sde_rsc_client_trigger_vote(struct sde_rsc_client *caller_client,
 		rsc->hw_ops.tcs_use_ok(rsc);
 
 end:
-	pm_runtime_put_sync(rsc->dev);
+	sde_rsc_resource_disable(rsc);
 clk_enable_fail:
 	mutex_unlock(&rsc->client_lock);
 
@@ -1299,7 +1378,7 @@ static void sde_rsc_deinit(struct platform_device *pdev,
 	if (!rsc)
 		return;
 
-	pm_runtime_put_sync(rsc->dev);
+	sde_rsc_resource_disable(rsc);
 	if (rsc->sw_fs_enabled)
 		regulator_disable(rsc->fs);
 	if (rsc->fs)
@@ -1314,82 +1393,6 @@ static void sde_rsc_deinit(struct platform_device *pdev,
 	kfree(rsc);
 }
 
-#ifdef CONFIG_PM
-static int sde_rsc_runtime_suspend(struct device *dev)
-{
-	struct sde_rsc_priv *rsc = dev_get_drvdata(dev);
-	struct dss_module_power *mp;
-	u32 reg_bus_hdl;
-
-	if (!rsc) {
-		pr_err("invalid drv data\n");
-		return -EINVAL;
-	}
-
-	mp = &rsc->phandle.mp;
-	msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
-	reg_bus_hdl = rsc->phandle.reg_bus_hdl;
-	if (reg_bus_hdl)
-		msm_bus_scale_client_update_request(reg_bus_hdl,
-				VOTE_INDEX_DISABLE);
-	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
-
-	return 0;
-}
-
-static int sde_rsc_runtime_resume(struct device *dev)
-{
-	struct sde_rsc_priv *rsc = dev_get_drvdata(dev);
-	struct dss_module_power *mp;
-	int rc = 0;
-	u32 reg_bus_hdl;
-
-	if (!rsc) {
-		pr_err("invalid drv data\n");
-		return -EINVAL;
-	}
-
-	mp = &rsc->phandle.mp;
-	rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, true);
-	if (rc) {
-		pr_err("failed to enable vregs rc=%d\n", rc);
-		goto end;
-	}
-
-	reg_bus_hdl = rsc->phandle.reg_bus_hdl;
-	if (reg_bus_hdl) {
-		rc = msm_bus_scale_client_update_request(reg_bus_hdl,
-				VOTE_INDEX_LOW);
-		if (rc) {
-			pr_err("failed to set reg bus vote rc=%d\n", rc);
-			goto reg_bus_hdl_err;
-		}
-	}
-
-	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
-	if (rc) {
-		pr_err("clock enable failed rc:%d\n", rc);
-		goto clk_err;
-	}
-
-	return rc;
-
-clk_err:
-	if (reg_bus_hdl)
-		msm_bus_scale_client_update_request(reg_bus_hdl,
-				VOTE_INDEX_DISABLE);
-reg_bus_hdl_err:
-	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false);
-end:
-	return rc;
-}
-#endif
-
-static const struct dev_pm_ops sde_rsc_pm_ops = {
-	SET_RUNTIME_PM_OPS(sde_rsc_runtime_suspend,
-				sde_rsc_runtime_resume, NULL)
-};
-
 /**
  * sde_rsc_bind - bind rsc device with controlling device
  * @dev:        Pointer to base of platform device
@@ -1556,8 +1559,7 @@ static int sde_rsc_probe(struct platform_device *pdev)
 
 	rsc->sw_fs_enabled = true;
 
-	pm_runtime_enable(rsc->dev);
-	ret = pm_runtime_get_sync(rsc->dev);
+	ret = sde_rsc_resource_enable(rsc);
 	if (ret < 0) {
 		pr_err("failed to enable sde rsc power resources rc:%d\n", ret);
 		goto sde_rsc_fail;
@@ -1566,12 +1568,13 @@ static int sde_rsc_probe(struct platform_device *pdev)
 	if (sde_rsc_timer_calculate(rsc, NULL, SDE_RSC_IDLE_STATE))
 		goto sde_rsc_fail;
 
-	pm_runtime_put_sync(rsc->dev);
+	sde_rsc_resource_disable(rsc);
 
 	INIT_LIST_HEAD(&rsc->client_list);
 	INIT_LIST_HEAD(&rsc->event_list);
 	mutex_init(&rsc->client_lock);
 	init_waitqueue_head(&rsc->rsc_vsync_waitq);
+	atomic_set(&rsc->resource_refcount, 0);
 
 	pr_info("sde rsc index:%d probed successfully\n",
 				SDE_RSC_INDEX + counter);
@@ -1644,7 +1647,6 @@ static struct platform_driver sde_rsc_platform_driver = {
 		.name   = "sde_rsc",
 		.of_match_table = dt_match,
 		.suppress_bind_attrs = true,
-		.pm     = &sde_rsc_pm_ops,
 	},
 };
 

+ 2 - 0
msm/sde_rsc_priv.h

@@ -185,6 +185,7 @@ struct sde_rsc_bw_config {
  * rsc_vsync_waitq:   Queue to wait for the vsync.
  * bw_config:		check sde_rsc_bw_config structure description.
  * dev:			rsc device node
+ * resource_refcount:	Track rsc resource refcount
  */
 struct sde_rsc_priv {
 	u32 version;
@@ -225,6 +226,7 @@ struct sde_rsc_priv {
 
 	struct sde_rsc_bw_config bw_config;
 	struct device *dev;
+	atomic_t resource_refcount;
 };
 
 /**

+ 2 - 2
pll/dsi_pll.h

@@ -23,8 +23,8 @@ struct lpfr_cfg {
 struct dsi_pll_vco_clk {
 	struct clk_hw	hw;
 	unsigned long	ref_clk_rate;
-	unsigned long	min_rate;
-	unsigned long	max_rate;
+	u64	min_rate;
+	u64	max_rate;
 	u32		pll_en_seq_cnt;
 	struct lpfr_cfg *lpfr_lut;
 	u32		lpfr_lut_size;

+ 3 - 2
pll/dsi_pll_14nm_util.c

@@ -1062,10 +1062,11 @@ long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
 						unsigned long *parent_rate)
 {
 	unsigned long rrate = rate;
-	u32 div;
+	u64 div;
 	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 
-	div = vco->min_rate / rate;
+	div = vco->min_rate;
+	do_div(div, rate);
 	if (div > 15) {
 		/* rate < 86.67 Mhz */
 		pr_err("rate=%lu NOT supportted\n", rate);

+ 6 - 6
pll/dsi_pll_7nm.c

@@ -576,11 +576,11 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll,
 		break;
 	case MDSS_DSI_PLL_7NM_V4_1:
 	default:
-		if (pll_freq <= 1000000000)
+		if (pll_freq <= 1000000000ULL)
 			regs->pll_clock_inverters = 0xA0;
-		else if (pll_freq <= 2500000000)
+		else if (pll_freq <= 2500000000ULL)
 			regs->pll_clock_inverters = 0x20;
-		else if (pll_freq <= 3020000000)
+		else if (pll_freq <= 3020000000ULL)
 			regs->pll_clock_inverters = 0x00;
 		else
 			regs->pll_clock_inverters = 0x40;
@@ -680,16 +680,16 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll,
 		break;
 	case MDSS_DSI_PLL_7NM_V4_1:
 	default:
-		if (vco_rate < 3100000000)
+		if (vco_rate < 3100000000ULL)
 			MDSS_PLL_REG_W(pll_base,
 					PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
 		else
 			MDSS_PLL_REG_W(pll_base,
 					PLL_ANALOG_CONTROLS_FIVE_1, 0x03);
 
-		if (vco_rate < 1520000000)
+		if (vco_rate < 1520000000ULL)
 			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x08);
-		else if (vco_rate < 2990000000)
+		else if (vco_rate < 2990000000ULL)
 			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x01);
 		else
 			MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);

+ 1 - 1
rotator/sde_rotator_smmu.c

@@ -617,7 +617,7 @@ int sde_smmu_probe(struct platform_device *pdev)
 				sizeof(*dev->dma_parms), GFP_KERNEL);
 
 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
-	dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
+	dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
 
 	iommu_set_fault_handler(sde_smmu->rot_domain,
 			sde_smmu_fault_handler, (void *)sde_smmu);