Forráskód Böngészése

Merge "disp: msm: dsi: DSI PHY V4 support of dynamic clock switch"

qctecmdr 6 éve
szülő
commit
acf755573d

+ 19 - 0
msm/dsi/dsi_catalog.c

@@ -62,6 +62,8 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
 		dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle;
 	ctrl->ops.setup_avr = dsi_ctrl_hw_cmn_setup_avr;
 	ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk;
+	ctrl->ops.wait4dynamic_refresh_done =
+		dsi_ctrl_hw_cmn_wait4dynamic_refresh_done;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
@@ -215,6 +217,14 @@ static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
 	phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
 	phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
 	phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
+	phy->ops.dyn_refresh_ops.dyn_refresh_config =
+		dsi_phy_hw_v3_0_dyn_refresh_config;
+	phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay =
+		dsi_phy_hw_v3_0_dyn_refresh_pipe_delay;
+	phy->ops.dyn_refresh_ops.dyn_refresh_helper =
+		dsi_phy_hw_v3_0_dyn_refresh_helper;
+	phy->ops.dyn_refresh_ops.cache_phy_timings =
+		dsi_phy_hw_v3_0_cache_phy_timings;
 }
 
 /**
@@ -242,6 +252,15 @@ static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
 	phy->ops.phy_lane_reset = dsi_phy_hw_v4_0_lane_reset;
 	phy->ops.toggle_resync_fifo = dsi_phy_hw_v4_0_toggle_resync_fifo;
 	phy->ops.reset_clk_en_sel = dsi_phy_hw_v4_0_reset_clk_en_sel;
+
+	phy->ops.dyn_refresh_ops.dyn_refresh_config =
+		dsi_phy_hw_v4_0_dyn_refresh_config;
+	phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay =
+		dsi_phy_hw_v4_0_dyn_refresh_pipe_delay;
+	phy->ops.dyn_refresh_ops.dyn_refresh_helper =
+		dsi_phy_hw_v4_0_dyn_refresh_helper;
+	phy->ops.dyn_refresh_ops.cache_phy_timings =
+		dsi_phy_hw_v4_0_cache_phy_timings;
 }
 
 /**

+ 24 - 3
msm/dsi/dsi_catalog.h

@@ -57,15 +57,17 @@ int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
  * @mode:       DSI mode information.
  * @host:       DSI host configuration.
  * @timing:     DSI phy lane configurations.
+ * @use_mode_bit_clk: Boolean to indicate whether to recalculate bit clk.
  *
  * This function setups the catalog information in the dsi_phy_hw object.
  *
  * return: error code for failure and 0 for success.
  */
 int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-	struct dsi_host_common_cfg *host,
-	struct dsi_phy_per_lane_cfgs *timing);
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *host,
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk);
 
 /* Definitions for 14nm PHY hardware driver */
 void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
@@ -238,4 +240,23 @@ void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
 
 void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
 
+/* dynamic refresh specific functions */
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+				struct dsi_phy_cfg *cfg, bool is_master);
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay);
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl);
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size);
+
+void dsi_phy_hw_v4_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
+void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+				struct dsi_phy_cfg *cfg, bool is_master);
+void dsi_phy_hw_v4_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay);
+
+int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size);
 #endif /* _DSI_CATALOG_H_ */

+ 14 - 0
msm/dsi/dsi_clk.h

@@ -307,4 +307,18 @@ int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index);
  */
 int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
 			  struct dsi_clk_link_set *child);
+
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk:       list of src clocks.
+ *
+ * @return:	Zero on success and err no on failure
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk);
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk:       list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk);
 #endif /* _DSI_CLK_H_ */

+ 51 - 6
msm/dsi/dsi_clk_manager.c

@@ -105,8 +105,9 @@ int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq,
 
 /**
  * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
- * @clks:      DSI link clock information.
- * @pixel_clk: Pixel clock rate in KHz.
+ * @clks:	DSI link clock information.
+ * @pixel_clk:	Pixel clock rate in KHz.
+ * @index:	Index of the DSI controller.
  *
  * return: error code in case of failure or 0 for success.
  */
@@ -128,9 +129,9 @@ int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index)
 
 /**
  * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
- * @client:       DSI clock client pointer.
- * @byte_clk: Pixel clock rate in Hz.
- * @index:      Index of the DSI controller.
+ * @client:	DSI clock client pointer.
+ * @byte_clk:	Byte clock rate in Hz.
+ * @index:	Index of the DSI controller.
  * return: error code in case of failure or 0 for success.
  */
 int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
@@ -138,6 +139,7 @@ int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
 	int rc = 0;
 	struct dsi_clk_client_info *c = client;
 	struct dsi_clk_mngr *mngr;
+	u64 byte_intf_rate;
 
 	mngr = c->mngr;
 	rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk);
@@ -146,8 +148,16 @@ int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
 	else
 		mngr->link_clks[index].freq.byte_clk_rate = byte_clk;
 
-	return rc;
+	if (mngr->link_clks[index].hs_clks.byte_intf_clk) {
+		byte_intf_rate = mngr->link_clks[index].freq.byte_clk_rate / 2;
+		rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_intf_clk,
+				  byte_intf_rate);
+		if (rc)
+			pr_err("failed to set clk rate for byte intf clk=%d\n",
+			       rc);
+	}
 
+	return rc;
 }
 
 /**
@@ -175,6 +185,41 @@ error:
 	return rc;
 }
 
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk:       list of src clocks.
+ *
+ * @return:	Zero on success and err no on failure.
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk)
+{
+	int rc;
+
+	rc = clk_prepare_enable(clk->byte_clk);
+	if (rc) {
+		pr_err("failed to enable byte src clk %d\n", rc);
+		return rc;
+	}
+
+	rc = clk_prepare_enable(clk->pixel_clk);
+	if (rc) {
+		pr_err("failed to enable pixel src clk %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk:       list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk)
+{
+	clk_disable_unprepare(clk->pixel_clk);
+	clk_disable_unprepare(clk->byte_clk);
+}
+
 int dsi_core_clk_start(struct dsi_core_clks *c_clks)
 {
 	int rc = 0;

+ 27 - 1
msm/dsi/dsi_ctrl.c

@@ -2887,7 +2887,12 @@ int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl,
 		goto error;
 	}
 
-	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR))) {
+	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+		       DSI_MODE_FLAG_DYN_CLK))) {
+		/*
+		 * for dynamic clk switch case link frequence would
+		 * be updated dsi_display_dynamic_clk_switch().
+		 */
 		rc = dsi_ctrl_update_link_freqs(ctrl, config, clk_handle,
 				mode);
 		if (rc) {
@@ -3603,6 +3608,27 @@ void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable)
 	mutex_unlock(&dsi_ctrl->ctrl_lock);
 }
 
+/**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamci refresh
+ *				done interrupt.
+ * @dsi_ctrl:              DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl)
+{
+	int rc = 0;
+
+	if (!ctrl)
+		return 0;
+
+	mutex_lock(&ctrl->ctrl_lock);
+
+	if (ctrl->hw.ops.wait4dynamic_refresh_done)
+		rc = ctrl->hw.ops.wait4dynamic_refresh_done(&ctrl->hw);
+
+	mutex_unlock(&ctrl->ctrl_lock);
+	return rc;
+}
+
 /**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */

+ 7 - 0
msm/dsi/dsi_ctrl.h

@@ -796,4 +796,11 @@ int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format);
  * @enable:			   variable to control continuous clock.
  */
 void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamic refresh done
+ *					interrupt.
+ * @dsi_ctrl:                      DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
 #endif /* _DSI_CTRL_H_ */

+ 6 - 0
msm/dsi/dsi_ctrl_hw.h

@@ -817,6 +817,12 @@ struct dsi_ctrl_hw_ops {
 	 * @enable:	  Bool to control continuous clock request.
 	 */
 	void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+	/**
+	 * hw.ops.wait4dynamic_refresh_done() - Wait for dynamic refresh done
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	int (*wait4dynamic_refresh_done)(struct dsi_ctrl_hw *ctrl);
 };
 
 /*

+ 29 - 0
msm/dsi/dsi_ctrl_hw_cmn.c

@@ -1472,6 +1472,13 @@ void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en)
 		}
 	}
 
+	if (idx & BIT(DSI_PLL_UNLOCK_ERR)) {
+		if (en)
+			reg |= BIT(28);
+		else
+			reg &= ~BIT(28);
+	}
+
 	DSI_W32(ctrl, 0x10c, reg);
 	wmb(); /* ensure error is masked */
 }
@@ -1538,3 +1545,25 @@ void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable)
 	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
 	wmb(); /* make sure request is set */
 }
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl)
+{
+	int rc;
+	u32 const sleep_us = 1000;
+	u32 const timeout_us = 84000; /* approximately 5 vsyncs */
+	u32 reg = 0, dyn_refresh_done = BIT(28);
+
+	rc = readl_poll_timeout(ctrl->base + DSI_INT_CTRL, reg,
+				(reg & dyn_refresh_done), sleep_us, timeout_us);
+	if (rc) {
+		pr_err("wait4dynamic refresh timedout %d\n", rc);
+		return rc;
+	}
+
+	/* ack dynamic refresh done status */
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+	reg |= dyn_refresh_done;
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	return 0;
+}

+ 0 - 37
msm/dsi/dsi_ctrl_reg.h

@@ -129,44 +129,7 @@
 #define DSI_SCRATCH_REGISTER_1                     (0x01F8)
 #define DSI_SCRATCH_REGISTER_2                     (0x01FC)
 #define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
-#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
 #define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
 #define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
 #define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
 #define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)

+ 40 - 0
msm/dsi/dsi_defs.h

@@ -87,6 +87,7 @@ enum dsi_op_mode {
  *                     New timing values are sent from DAL.
  * @DSI_MODE_FLAG_POMS:
  *         Seamless transition is dynamic panel operating mode switch
+ * @DSI_MODE_FLAG_DYN_CLK: Seamless transition is dynamic clock change
  */
 enum dsi_mode_flags {
 	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
@@ -95,6 +96,7 @@ enum dsi_mode_flags {
 	DSI_MODE_FLAG_DMS			= BIT(3),
 	DSI_MODE_FLAG_VRR			= BIT(4),
 	DSI_MODE_FLAG_POMS			= BIT(5),
+	DSI_MODE_FLAG_DYN_CLK			= BIT(6),
 };
 
 /**
@@ -660,12 +662,50 @@ struct dsi_event_cb_info {
  * @DSI_FIFO_OVERFLOW:     DSI FIFO Overflow error
  * @DSI_FIFO_UNDERFLOW:    DSI FIFO Underflow error
  * @DSI_LP_Rx_TIMEOUT:     DSI LP/RX Timeout error
+ * @DSI_PLL_UNLOCK_ERR:	   DSI PLL unlock error
  */
 enum dsi_error_status {
 	DSI_FIFO_OVERFLOW = 1,
 	DSI_FIFO_UNDERFLOW,
 	DSI_LP_Rx_TIMEOUT,
+	DSI_PLL_UNLOCK_ERR,
 	DSI_ERR_INTR_ALL,
 };
 
+/* structure containing the delays required for dynamic clk */
+struct dsi_dyn_clk_delay {
+	u32 pipe_delay;
+	u32 pipe_delay2;
+	u32 pll_delay;
+};
+
+/* dynamic refresh control bits */
+enum dsi_dyn_clk_control_bits {
+	DYN_REFRESH_INTF_SEL = 1,
+	DYN_REFRESH_SYNC_MODE,
+	DYN_REFRESH_SW_TRIGGER,
+	DYN_REFRESH_SWI_CTRL,
+};
+
+/* convert dsi pixel format into bits per pixel */
+static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case DSI_PIXEL_FORMAT_RGB888:
+	case DSI_PIXEL_FORMAT_MAX:
+		return 24;
+	case DSI_PIXEL_FORMAT_RGB666:
+	case DSI_PIXEL_FORMAT_RGB666_LOOSE:
+		return 18;
+	case DSI_PIXEL_FORMAT_RGB565:
+		return 16;
+	case DSI_PIXEL_FORMAT_RGB111:
+		return 3;
+	case DSI_PIXEL_FORMAT_RGB332:
+		return 8;
+	case DSI_PIXEL_FORMAT_RGB444:
+		return 12;
+	}
+	return 24;
+}
 #endif /* _DSI_DEFS_H_ */

+ 540 - 320
msm/dsi/dsi_display.c

@@ -1620,14 +1620,12 @@ static int dsi_display_debugfs_deinit(struct dsi_display *display)
 static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
 					struct dsi_display_mode *mode)
 {
-	if (display->ctrl_count > 1) {
-		mode->timing.h_active /= display->ctrl_count;
-		mode->timing.h_front_porch /= display->ctrl_count;
-		mode->timing.h_sync_width /= display->ctrl_count;
-		mode->timing.h_back_porch /= display->ctrl_count;
-		mode->timing.h_skew /= display->ctrl_count;
-		mode->pixel_clk_khz /= display->ctrl_count;
-	}
+	mode->timing.h_active /= display->ctrl_count;
+	mode->timing.h_front_porch /= display->ctrl_count;
+	mode->timing.h_sync_width /= display->ctrl_count;
+	mode->timing.h_back_porch /= display->ctrl_count;
+	mode->timing.h_skew /= display->ctrl_count;
+	mode->pixel_clk_khz /= display->ctrl_count;
 }
 
 static int dsi_display_is_ulps_req_valid(struct dsi_display *display,
@@ -2248,7 +2246,7 @@ static int dsi_display_set_clk_src(struct dsi_display *display)
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 
 	rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
-		   &display->clock_info.src_clks);
+		   &display->clock_info.mux_clks);
 	if (rc) {
 		pr_err("[%s] failed to set source clocks for master, rc=%d\n",
 			   display->name, rc);
@@ -2262,7 +2260,7 @@ static int dsi_display_set_clk_src(struct dsi_display *display)
 			continue;
 
 		rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
-			   &display->clock_info.src_clks);
+			   &display->clock_info.mux_clks);
 		if (rc) {
 			pr_err("[%s] failed to set source clocks, rc=%d\n",
 				   display->name, rc);
@@ -2933,6 +2931,7 @@ static int dsi_display_clocks_init(struct dsi_display *display)
 	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
 	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
 	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+	struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
 	char *dsi_clock_name;
 
 	if (!strcmp(display->display_type, "primary"))
@@ -2955,7 +2954,35 @@ static int dsi_display_clocks_init(struct dsi_display *display)
 			rc = PTR_ERR(dsi_clk);
 
 			pr_err("failed to get %s, rc=%d\n", clk_name, rc);
-			goto error;
+
+			if (dsi_display_check_prefix(mux_byte, clk_name)) {
+				mux->byte_clk = NULL;
+				goto error;
+			}
+			if (dsi_display_check_prefix(mux_pixel, clk_name)) {
+				mux->pixel_clk = NULL;
+				goto error;
+			}
+
+			if (dyn_clk_caps->dyn_clk_support &&
+				(display->panel->panel_mode ==
+					 DSI_OP_VIDEO_MODE)) {
+
+				if (dsi_display_check_prefix(src_byte,
+							clk_name))
+					src->byte_clk = NULL;
+				if (dsi_display_check_prefix(src_pixel,
+							clk_name))
+					src->pixel_clk = NULL;
+				if (dsi_display_check_prefix(shadow_byte,
+							clk_name))
+					shadow->byte_clk = NULL;
+				if (dsi_display_check_prefix(shadow_pixel,
+							clk_name))
+					shadow->pixel_clk = NULL;
+
+				dyn_clk_caps->dyn_clk_support = false;
+			}
 		}
 
 		if (dsi_display_check_prefix(src_byte, clk_name)) {
@@ -3739,6 +3766,338 @@ static bool dsi_display_is_seamless_dfps_possible(
 	return true;
 }
 
+static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
+					  u32 bit_clk_rate)
+{
+	int rc = 0;
+	int i;
+
+	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
+	if (!display->panel) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (bit_clk_rate == 0) {
+		pr_err("Invalid bit clock rate\n");
+		return -EINVAL;
+	}
+
+	display->config.bit_clk_rate_hz = bit_clk_rate;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
+		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
+		u32 num_of_lanes = 0, bpp;
+		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
+		struct dsi_host_common_cfg *host_cfg;
+
+		mutex_lock(&ctrl->ctrl_lock);
+
+		host_cfg = &display->panel->host_config;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+			num_of_lanes++;
+		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+			num_of_lanes++;
+
+		if (num_of_lanes == 0) {
+			pr_err("Invalid lane count\n");
+			rc = -EINVAL;
+			goto error;
+		}
+
+		bpp = dsi_pixel_format_to_bpp(host_cfg->dst_format);
+
+		bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
+		bit_rate_per_lane = bit_rate;
+		do_div(bit_rate_per_lane, num_of_lanes);
+		pclk_rate = bit_rate;
+		do_div(pclk_rate, bpp);
+		byte_clk_rate = bit_rate_per_lane;
+		do_div(byte_clk_rate, 8);
+		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+			 bit_rate, bit_rate_per_lane);
+		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+			  byte_clk_rate, pclk_rate);
+
+		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
+		ctrl->clk_freq.pix_clk_rate = pclk_rate;
+		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
+			ctrl->clk_freq, ctrl->cell_index);
+		if (rc) {
+			pr_err("Failed to update link frequencies\n");
+			goto error;
+		}
+
+		ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
+error:
+		mutex_unlock(&ctrl->ctrl_lock);
+
+		/* TODO: recover ctrl->clk_freq in case of failure */
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static void _dsi_display_calc_pipe_delay(struct dsi_display *display,
+				    struct dsi_dyn_clk_delay *delay,
+				    struct dsi_display_mode *mode)
+{
+	u32 esc_clk_rate_hz;
+	u32 pclk_to_esc_ratio, byte_to_esc_ratio, hr_bit_to_esc_ratio;
+	u32 hsync_period = 0;
+	struct dsi_display_ctrl *m_ctrl;
+	struct dsi_ctrl *dsi_ctrl;
+	struct dsi_phy_cfg *cfg;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+	dsi_ctrl = m_ctrl->ctrl;
+
+	cfg = &(m_ctrl->phy->cfg);
+
+	esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate * 1000;
+	pclk_to_esc_ratio = ((dsi_ctrl->clk_freq.pix_clk_rate * 1000) /
+			     esc_clk_rate_hz);
+	byte_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 1000) /
+			     esc_clk_rate_hz);
+	hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4 * 1000) /
+					esc_clk_rate_hz);
+
+	hsync_period = DSI_H_TOTAL_DSC(&mode->timing);
+	delay->pipe_delay = (hsync_period + 1) / pclk_to_esc_ratio;
+	if (!display->panel->video_config.eof_bllp_lp11_en)
+		delay->pipe_delay += (17 / pclk_to_esc_ratio) +
+			((21 + (display->config.common_config.t_clk_pre + 1) +
+			  (display->config.common_config.t_clk_post + 1)) /
+			 byte_to_esc_ratio) +
+			((((cfg->timing.lane_v3[8] >> 1) + 1) +
+			((cfg->timing.lane_v3[6] >> 1) + 1) +
+			((cfg->timing.lane_v3[3] * 4) +
+			 (cfg->timing.lane_v3[5] >> 1) + 1) +
+			((cfg->timing.lane_v3[7] >> 1) + 1) +
+			((cfg->timing.lane_v3[1] >> 1) + 1) +
+			((cfg->timing.lane_v3[4] >> 1) + 1)) /
+			 hr_bit_to_esc_ratio);
+
+	delay->pipe_delay2 = 0;
+	if (display->panel->host_config.force_hs_clk_lane)
+		delay->pipe_delay2 = (6 / byte_to_esc_ratio) +
+			((((cfg->timing.lane_v3[1] >> 1) + 1) +
+			  ((cfg->timing.lane_v3[4] >> 1) + 1)) /
+			 hr_bit_to_esc_ratio);
+
+	/* 130 us pll delay recommended by h/w doc */
+	delay->pll_delay = ((130 * esc_clk_rate_hz) / 1000000) * 2;
+}
+
+static int _dsi_display_dyn_update_clks(struct dsi_display *display,
+					struct link_clk_freq *bkp_freq)
+{
+	int rc = 0, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	dsi_clk_prepare_enable(&display->clock_info.src_clks);
+
+	rc = dsi_clk_update_parent(&display->clock_info.shadow_clks,
+			      &display->clock_info.mux_clks);
+	if (rc) {
+		pr_err("failed update mux parent to shadow\n");
+		goto exit;
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		rc = dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+				   ctrl->ctrl->clk_freq.byte_clk_rate, i);
+		if (rc) {
+			pr_err("failed to set byte rate for index:%d\n", i);
+			goto recover_byte_clk;
+		}
+		rc = dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+				   ctrl->ctrl->clk_freq.pix_clk_rate, i);
+		if (rc) {
+			pr_err("failed to set pix rate for index:%d\n", i);
+			goto recover_pix_clk;
+		}
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+		dsi_phy_dynamic_refresh_trigger(ctrl->phy, false);
+	}
+	dsi_phy_dynamic_refresh_trigger(m_ctrl->phy, true);
+
+	/* wait for dynamic refresh done */
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		rc = dsi_ctrl_wait4dynamic_refresh_done(ctrl->ctrl);
+		if (rc) {
+			pr_err("wait4dynamic refresh failed for dsi:%d\n", i);
+			goto recover_pix_clk;
+		} else {
+			pr_info("dynamic refresh done on dsi: %s\n",
+				i ? "slave" : "master");
+		}
+	}
+
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_dynamic_refresh_clear(ctrl->phy);
+	}
+
+	rc = dsi_clk_update_parent(&display->clock_info.src_clks,
+			      &display->clock_info.mux_clks);
+	if (rc)
+		pr_err("could not switch back to src clks %d\n", rc);
+
+	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+	return rc;
+
+recover_pix_clk:
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+					   bkp_freq->pix_clk_rate, i);
+	}
+
+recover_byte_clk:
+	for (i = 0; (i < display->ctrl_count) &&
+	     (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl)
+			continue;
+		dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+					  bkp_freq->byte_clk_rate, i);
+	}
+
+exit:
+	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+	return rc;
+}
+
+static int dsi_display_dynamic_clk_switch_vid(struct dsi_display *display,
+					  struct dsi_display_mode *mode)
+{
+	int rc = 0, mask, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	struct dsi_dyn_clk_delay delay;
+	struct link_clk_freq bkp_freq;
+
+	dsi_panel_acquire_panel_lock(display->panel);
+
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON);
+
+	/* mask PLL unlock, FIFO overflow and underflow errors */
+	mask = BIT(DSI_PLL_UNLOCK_ERR) | BIT(DSI_FIFO_UNDERFLOW) |
+		BIT(DSI_FIFO_OVERFLOW);
+	dsi_display_mask_ctrl_error_interrupts(display, mask, true);
+
+	/* update the phy timings based on new mode */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		dsi_phy_update_phy_timings(ctrl->phy, &display->config);
+	}
+
+	/* back up existing rates to handle failure case */
+	bkp_freq.byte_clk_rate = m_ctrl->ctrl->clk_freq.byte_clk_rate;
+	bkp_freq.pix_clk_rate = m_ctrl->ctrl->clk_freq.pix_clk_rate;
+	bkp_freq.esc_clk_rate = m_ctrl->ctrl->clk_freq.esc_clk_rate;
+
+	rc = dsi_display_update_dsi_bitrate(display, mode->timing.clk_rate_hz);
+	if (rc) {
+		pr_err("failed set link frequencies %d\n", rc);
+		goto exit;
+	}
+
+	/* calculate pipe delays */
+	_dsi_display_calc_pipe_delay(display, &delay, mode);
+
+	/* configure dynamic refresh ctrl registers */
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->phy)
+			continue;
+		if (ctrl == m_ctrl)
+			dsi_phy_config_dynamic_refresh(ctrl->phy, &delay, true);
+		else
+			dsi_phy_config_dynamic_refresh(ctrl->phy, &delay,
+						       false);
+	}
+
+	rc = _dsi_display_dyn_update_clks(display, &bkp_freq);
+
+exit:
+	dsi_display_mask_ctrl_error_interrupts(display, mask, false);
+
+	dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS,
+			     DSI_CLK_OFF);
+
+	/* store newly calculated phy timings in mode private info */
+	dsi_phy_dyn_refresh_cache_phy_timings(m_ctrl->phy,
+					      mode->priv_info->phy_timing_val,
+					      mode->priv_info->phy_timing_len);
+
+	dsi_panel_release_panel_lock(display->panel);
+
+	return rc;
+}
+
+static int dsi_display_dynamic_clk_configure_cmd(struct dsi_display *display,
+		int clk_rate)
+{
+	int rc = 0;
+
+	if (clk_rate <= 0) {
+		pr_err("%s: bitrate should be greater than 0\n", __func__);
+		return -EINVAL;
+	}
+
+	if (clk_rate == display->cached_clk_rate) {
+		pr_info("%s: ignore duplicated DSI clk setting\n", __func__);
+		return rc;
+	}
+
+	display->cached_clk_rate = clk_rate;
+
+	rc = dsi_display_update_dsi_bitrate(display, clk_rate);
+	if (!rc) {
+		pr_info("%s: bit clk is ready to be configured to '%d'\n",
+				__func__, clk_rate);
+		atomic_set(&display->clkrate_change_pending, 1);
+	} else {
+		pr_err("%s: Failed to prepare to configure '%d'. rc = %d\n",
+				__func__, clk_rate, rc);
+		/* Caching clock failed, so don't go on doing so. */
+		atomic_set(&display->clkrate_change_pending, 0);
+		display->cached_clk_rate = 0;
+	}
+
+	return rc;
+}
+
 static int dsi_display_dfps_update(struct dsi_display *display,
 				   struct dsi_display_mode *dsi_mode)
 {
@@ -3972,7 +4331,7 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 				    struct dsi_display_mode *mode,
 				    u32 flags)
 {
-	int rc = 0;
+	int rc = 0, clk_rate = 0;
 	int i;
 	struct dsi_display_ctrl *ctrl;
 	struct dsi_display_mode_priv_info *priv_info;
@@ -4008,6 +4367,26 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
 					display->name, rc);
 			goto error;
 		}
+	} else if (mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK) {
+		if (display->panel->panel_mode == DSI_OP_VIDEO_MODE) {
+			rc = dsi_display_dynamic_clk_switch_vid(display, mode);
+			if (rc)
+				pr_err("dynamic clk change failed %d\n", rc);
+			/*
+			 * skip rest of the opearations since
+			 * dsi_display_dynamic_clk_switch_vid() already takes
+			 * care of them.
+			 */
+			return rc;
+		} else if (display->panel->panel_mode == DSI_OP_CMD_MODE) {
+			clk_rate = mode->timing.clk_rate_hz;
+			rc = dsi_display_dynamic_clk_configure_cmd(display,
+					clk_rate);
+			if (rc) {
+				pr_err("Failed to configure dynamic clk\n");
+				return rc;
+			}
+		}
 	}
 
 	display_for_each_ctrl(i, display) {
@@ -4230,208 +4609,6 @@ static int dsi_display_force_update_dsi_clk(struct dsi_display *display)
 	return rc;
 }
 
-static int dsi_display_request_update_dsi_bitrate(struct dsi_display *display,
-					u32 bit_clk_rate)
-{
-	int rc = 0;
-	int i;
-
-	pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
-	if (!display->panel) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	if (bit_clk_rate == 0) {
-		pr_err("Invalid bit clock rate\n");
-		return -EINVAL;
-	}
-
-	display->config.bit_clk_rate_hz_override = bit_clk_rate;
-
-	display_for_each_ctrl(i, display) {
-		struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
-		struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
-		u32 num_of_lanes = 0;
-		u32 bpp = 3;
-		u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
-		struct dsi_host_common_cfg *host_cfg;
-
-		mutex_lock(&ctrl->ctrl_lock);
-
-		host_cfg = &display->panel->host_config;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_0)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_1)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_2)
-			num_of_lanes++;
-		if (host_cfg->data_lanes & DSI_DATA_LANE_3)
-			num_of_lanes++;
-
-		if (num_of_lanes == 0) {
-			pr_err("Invalid lane count\n");
-			rc = -EINVAL;
-			goto error;
-		}
-
-		bit_rate = display->config.bit_clk_rate_hz_override *
-						num_of_lanes;
-		bit_rate_per_lane = bit_rate;
-		do_div(bit_rate_per_lane, num_of_lanes);
-		pclk_rate = bit_rate;
-		do_div(pclk_rate, (8 * bpp));
-		byte_clk_rate = bit_rate_per_lane;
-		do_div(byte_clk_rate, 8);
-		pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
-			 bit_rate, bit_rate_per_lane);
-		pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
-			  byte_clk_rate, pclk_rate);
-
-		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
-		ctrl->clk_freq.pix_clk_rate = pclk_rate;
-		rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
-			ctrl->clk_freq, ctrl->cell_index);
-		if (rc) {
-			pr_err("Failed to update link frequencies\n");
-			goto error;
-		}
-
-		ctrl->host_config.bit_clk_rate_hz_override = bit_clk_rate;
-error:
-		mutex_unlock(&ctrl->ctrl_lock);
-
-		/* TODO: recover ctrl->clk_freq in case of failure */
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
-static ssize_t dynamic_dsi_clock_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	int rc = 0;
-	struct dsi_display *display;
-	struct dsi_display_ctrl *m_ctrl;
-	struct dsi_ctrl *ctrl;
-
-	display = dev_get_drvdata(dev);
-	if (!display) {
-		pr_err("Invalid display\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(&display->display_lock);
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	ctrl = m_ctrl->ctrl;
-	if (ctrl)
-		display->cached_clk_rate = ctrl->clk_freq.byte_clk_rate
-					     * 8;
-
-	rc = snprintf(buf, PAGE_SIZE, "%d\n", display->cached_clk_rate);
-	pr_debug("%s: read dsi clk rate %d\n", __func__,
-		display->cached_clk_rate);
-
-	mutex_unlock(&display->display_lock);
-
-	return rc;
-}
-
-static ssize_t dynamic_dsi_clock_store(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	int rc = 0;
-	int clk_rate;
-	struct dsi_display *display;
-
-	display = dev_get_drvdata(dev);
-	if (!display) {
-		pr_err("Invalid display\n");
-		return -EINVAL;
-	}
-
-	rc = kstrtoint(buf, DSI_CLOCK_BITRATE_RADIX, &clk_rate);
-	if (rc) {
-		pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
-		return rc;
-	}
-
-	if (clk_rate <= 0) {
-		pr_err("%s: bitrate should be greater than 0\n", __func__);
-		return -EINVAL;
-	}
-
-	if (clk_rate == display->cached_clk_rate) {
-		pr_info("%s: ignore duplicated DSI clk setting\n", __func__);
-		return count;
-	}
-
-	pr_info("%s: bitrate param value: '%d'\n", __func__, clk_rate);
-
-	mutex_lock(&display->display_lock);
-
-	display->cached_clk_rate = clk_rate;
-	rc = dsi_display_request_update_dsi_bitrate(display, clk_rate);
-	if (!rc) {
-		pr_info("%s: bit clk is ready to be configured to '%d'\n",
-			__func__, clk_rate);
-	} else {
-		pr_err("%s: Failed to prepare to configure '%d'. rc = %d\n",
-			__func__, clk_rate, rc);
-		/*Caching clock failed, so don't go on doing so.*/
-		atomic_set(&display->clkrate_change_pending, 0);
-		display->cached_clk_rate = 0;
-
-		mutex_unlock(&display->display_lock);
-
-		return rc;
-	}
-	atomic_set(&display->clkrate_change_pending, 1);
-
-	mutex_unlock(&display->display_lock);
-
-	return count;
-
-}
-
-static DEVICE_ATTR_RW(dynamic_dsi_clock);
-
-static struct attribute *dynamic_dsi_clock_fs_attrs[] = {
-	&dev_attr_dynamic_dsi_clock.attr,
-	NULL,
-};
-static struct attribute_group dynamic_dsi_clock_fs_attrs_group = {
-	.attrs = dynamic_dsi_clock_fs_attrs,
-};
-
-static int dsi_display_sysfs_init(struct dsi_display *display)
-{
-	int rc = 0;
-	struct device *dev = &display->pdev->dev;
-
-	if (display->panel->panel_mode == DSI_OP_CMD_MODE)
-		rc = sysfs_create_group(&dev->kobj,
-			&dynamic_dsi_clock_fs_attrs_group);
-
-	return rc;
-
-}
-
-static int dsi_display_sysfs_deinit(struct dsi_display *display)
-{
-	struct device *dev = &display->pdev->dev;
-
-	if (display->panel->panel_mode == DSI_OP_CMD_MODE)
-		sysfs_remove_group(&dev->kobj,
-			&dynamic_dsi_clock_fs_attrs_group);
-
-	return 0;
-
-}
-
 /**
  * dsi_display_bind - bind dsi device with controlling device
  * @dev:        Pointer to base of platform device
@@ -4483,12 +4660,6 @@ static int dsi_display_bind(struct device *dev,
 	atomic_set(&display->clkrate_change_pending, 0);
 	display->cached_clk_rate = 0;
 
-	rc = dsi_display_sysfs_init(display);
-	if (rc) {
-		pr_err("[%s] sysfs init failed, rc=%d\n", display->name, rc);
-		goto error;
-	}
-
 	memset(&info, 0x0, sizeof(info));
 
 	display_for_each_ctrl(i, display) {
@@ -4634,7 +4805,6 @@ error_ctrl_deinit:
 		(void)dsi_phy_drv_deinit(display_ctrl->phy);
 		(void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
 	}
-	(void)dsi_display_sysfs_deinit(display);
 	(void)dsi_display_debugfs_deinit(display);
 error:
 	mutex_unlock(&display->display_lock);
@@ -4694,7 +4864,6 @@ static void dsi_display_unbind(struct device *dev,
 	}
 
 	atomic_set(&display->clkrate_change_pending, 0);
-	(void)dsi_display_sysfs_deinit(display);
 	(void)dsi_display_debugfs_deinit(display);
 
 	mutex_unlock(&display->display_lock);
@@ -5453,7 +5622,8 @@ static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
 			u32 *count)
 {
 	struct dsi_dfps_capabilities dfps_caps;
-	int num_dfps_rates, rc = 0;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	int num_dfps_rates, num_bit_clks, rc = 0;
 
 	if (!display || !display->panel) {
 		pr_err("invalid display:%d panel:%d\n", display != NULL,
@@ -5470,12 +5640,16 @@ static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
 		return rc;
 	}
 
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
+
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
 
-	/* Inflate num_of_modes by fps in dfps */
-	*count = display->panel->num_timing_nodes * num_dfps_rates;
+	num_bit_clks = !dyn_clk_caps->dyn_clk_support ? 1 :
+					dyn_clk_caps->bit_clk_list_len;
+
+	/* Inflate num_of_modes by fps and bit clks in dfps */
+	*count = display->panel->num_timing_nodes *
+				num_dfps_rates * num_bit_clks;
 
 	return 0;
 }
@@ -5498,6 +5672,73 @@ int dsi_display_get_mode_count(struct dsi_display *display,
 	return 0;
 }
 
+static void _dsi_display_populate_bit_clks(struct dsi_display *display,
+					   int start, int end, u32 *mode_idx)
+{
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	struct dsi_display_mode *src, *dst;
+	struct dsi_host_common_cfg *cfg;
+	int i, j, total_modes, bpp, lanes = 0;
+
+	if (!display || !mode_idx)
+		return;
+
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+	if (!dyn_clk_caps->dyn_clk_support)
+		return;
+
+	cfg = &(display->panel->host_config);
+	bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
+
+	if (cfg->data_lanes & DSI_DATA_LANE_0)
+		lanes++;
+	if (cfg->data_lanes & DSI_DATA_LANE_1)
+		lanes++;
+	if (cfg->data_lanes & DSI_DATA_LANE_2)
+		lanes++;
+	if (cfg->data_lanes & DSI_DATA_LANE_3)
+		lanes++;
+
+	dsi_display_get_mode_count_no_lock(display, &total_modes);
+
+	for (i = start; i < end; i++) {
+		src = &display->modes[i];
+		if (!src)
+			return;
+		/*
+		 * TODO: currently setting the first bit rate in
+		 * the list as preferred rate. But ideally should
+		 * be based on user or device tree preferrence.
+		 */
+		src->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[0];
+		src->pixel_clk_khz =
+			div_u64(src->timing.clk_rate_hz * lanes, bpp);
+		src->pixel_clk_khz /= 1000;
+		src->pixel_clk_khz *= display->ctrl_count;
+	}
+
+	for (i = 1; i < dyn_clk_caps->bit_clk_list_len; i++) {
+		if (*mode_idx >= total_modes)
+			return;
+		for (j = start; j < end; j++) {
+			src = &display->modes[j];
+			dst = &display->modes[*mode_idx];
+
+			if (!src || !dst) {
+				pr_err("invalid mode index\n");
+				return;
+			}
+			memcpy(dst, src, sizeof(struct dsi_display_mode));
+			dst->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[i];
+			dst->pixel_clk_khz =
+				div_u64(dst->timing.clk_rate_hz * lanes, bpp);
+			dst->pixel_clk_khz /= 1000;
+			dst->pixel_clk_khz *= display->ctrl_count;
+			(*mode_idx)++;
+		}
+	}
+}
+
 void dsi_display_put_mode(struct dsi_display *display,
 	struct dsi_display_mode *mode)
 {
@@ -5510,7 +5751,8 @@ int dsi_display_get_modes(struct dsi_display *display,
 	struct dsi_dfps_capabilities dfps_caps;
 	u32 num_dfps_rates, panel_mode_count, total_mode_count;
 	u32 mode_idx, array_idx = 0;
-	int i, rc = -EINVAL;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	int i, start, end, rc = -EINVAL;
 
 	if (!display || !out_modes) {
 		pr_err("Invalid params\n");
@@ -5542,9 +5784,9 @@ int dsi_display_get_modes(struct dsi_display *display,
 		goto error;
 	}
 
-	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
-			dfps_caps.max_refresh_rate -
-			dfps_caps.min_refresh_rate + 1;
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
 
 	panel_mode_count = display->panel->num_timing_nodes;
 
@@ -5582,14 +5824,14 @@ int dsi_display_get_modes(struct dsi_display *display,
 				panel_mode.timing.dsi_transfer_time_us;
 		}
 
-		if (display->ctrl_count > 1) { /* TODO: remove if */
-			panel_mode.timing.h_active *= display->ctrl_count;
-			panel_mode.timing.h_front_porch *= display->ctrl_count;
-			panel_mode.timing.h_sync_width *= display->ctrl_count;
-			panel_mode.timing.h_back_porch *= display->ctrl_count;
-			panel_mode.timing.h_skew *= display->ctrl_count;
-			panel_mode.pixel_clk_khz *= display->ctrl_count;
-		}
+		panel_mode.timing.h_active *= display->ctrl_count;
+		panel_mode.timing.h_front_porch *= display->ctrl_count;
+		panel_mode.timing.h_sync_width *= display->ctrl_count;
+		panel_mode.timing.h_back_porch *= display->ctrl_count;
+		panel_mode.timing.h_skew *= display->ctrl_count;
+		panel_mode.pixel_clk_khz *= display->ctrl_count;
+
+		start = array_idx;
 
 		for (i = 0; i < num_dfps_rates; i++) {
 			struct dsi_display_mode *sub_mode =
@@ -5603,24 +5845,23 @@ int dsi_display_get_modes(struct dsi_display *display,
 			}
 
 			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
+			array_idx++;
 
-			if (dfps_caps.dfps_support) {
-				curr_refresh_rate =
-					sub_mode->timing.refresh_rate;
-				sub_mode->timing.refresh_rate =
-					dfps_caps.min_refresh_rate +
-					(i % num_dfps_rates);
+			if (!dfps_caps.dfps_support)
+				continue;
 
-				dsi_display_get_dfps_timing(display,
-					sub_mode, curr_refresh_rate);
+			curr_refresh_rate = sub_mode->timing.refresh_rate;
+			sub_mode->timing.refresh_rate = dfps_caps.dfps_list[i];
 
-				sub_mode->pixel_clk_khz =
-					(DSI_H_TOTAL_DSC(&sub_mode->timing) *
-					DSI_V_TOTAL(&sub_mode->timing) *
-					sub_mode->timing.refresh_rate) / 1000;
-			}
-			array_idx++;
+			dsi_display_get_dfps_timing(display, sub_mode,
+					curr_refresh_rate);
 		}
+		end = array_idx;
+		/*
+		 * if dynamic clk switch is supported then update all the bit
+		 * clk rates.
+		 */
+		_dsi_display_populate_bit_clks(display, start, end, &array_idx);
 	}
 
 exit:
@@ -5744,7 +5985,8 @@ int dsi_display_find_mode(struct dsi_display *display,
 		if (cmp->timing.v_active == m->timing.v_active &&
 			cmp->timing.h_active == m->timing.h_active &&
 			cmp->timing.refresh_rate == m->timing.refresh_rate &&
-			cmp->panel_mode == m->panel_mode) {
+			cmp->panel_mode == m->panel_mode &&
+			cmp->pixel_clk_khz == m->pixel_clk_khz) {
 			*out_mode = m;
 			rc = 0;
 			break;
@@ -5753,9 +5995,10 @@ int dsi_display_find_mode(struct dsi_display *display,
 	mutex_unlock(&display->display_lock);
 
 	if (!*out_mode) {
-		pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
+		pr_err("[%s] failed to find mode for v_active %u h_active %u fps %u pclk %u\n",
 				display->name, cmp->timing.v_active,
-				cmp->timing.h_active, cmp->timing.refresh_rate);
+				cmp->timing.h_active, cmp->timing.refresh_rate,
+				cmp->pixel_clk_khz);
 		rc = -ENOENT;
 	}
 
@@ -5763,7 +6006,7 @@ int dsi_display_find_mode(struct dsi_display *display,
 }
 
 /**
- * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
+ * dsi_display_validate_mode_change() - Validate if varaible refresh case.
  * @display:     DSI display handle.
  * @cur_dsi_mode:   Current DSI mode.
  * @mode:        Mode value structure to be validated.
@@ -5771,16 +6014,15 @@ int dsi_display_find_mode(struct dsi_display *display,
  *               is change in fps but vactive and hactive are same.
  * Return: error code.
  */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
-			struct dsi_display_mode *cur_dsi_mode,
-			struct dsi_display_mode *mode)
+int dsi_display_validate_mode_change(struct dsi_display *display,
+			struct dsi_display_mode *cur_mode,
+			struct dsi_display_mode *adj_mode)
 {
 	int rc = 0;
-	struct dsi_display_mode adj_mode, cur_mode;
 	struct dsi_dfps_capabilities dfps_caps;
-	u32 curr_refresh_rate;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
 
-	if (!display || !mode) {
+	if (!display || !adj_mode) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
@@ -5792,65 +6034,43 @@ int dsi_display_validate_mode_vrr(struct dsi_display *display,
 
 	mutex_lock(&display->display_lock);
 
-	adj_mode = *mode;
-	cur_mode = *cur_dsi_mode;
-
-	if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
-		(cur_mode.timing.v_active == adj_mode.timing.v_active) &&
-		(cur_mode.timing.h_active == adj_mode.timing.h_active)) {
-
-		curr_refresh_rate = cur_mode.timing.refresh_rate;
-		rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-		if (rc) {
-			pr_err("[%s] failed to get dfps caps from panel\n",
-					display->name);
-			goto error;
-		}
-
-		cur_mode.timing.refresh_rate =
-			adj_mode.timing.refresh_rate;
-
-		rc = dsi_display_get_dfps_timing(display,
-			&cur_mode, curr_refresh_rate);
-		if (rc) {
-			pr_err("[%s] seamless vrr not possible rc=%d\n",
-			display->name, rc);
-			goto error;
+	if ((cur_mode->timing.v_active == adj_mode->timing.v_active) &&
+	    (cur_mode->timing.h_active == adj_mode->timing.h_active)) {
+		/* dfps change use case */
+		if (cur_mode->timing.refresh_rate !=
+		    adj_mode->timing.refresh_rate) {
+			dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+			if (!dfps_caps.dfps_support) {
+				pr_err("invalid mode dfps not supported\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			pr_debug("Mode switch is seamless variable refresh\n");
+			adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+			SDE_EVT32(cur_mode->timing.refresh_rate,
+				  adj_mode->timing.refresh_rate,
+				  cur_mode->timing.h_front_porch,
+				  adj_mode->timing.h_front_porch);
 		}
-		switch (dfps_caps.type) {
-		/*
-		 * Ignore any round off factors in porch calculation.
-		 * Worse case is set to 5.
-		 */
-		case DSI_DFPS_IMMEDIATE_VFP:
-			if (abs(DSI_V_TOTAL(&cur_mode.timing) -
-				DSI_V_TOTAL(&adj_mode.timing)) > 5)
-				pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.v_front_porch,
-				adj_mode.timing.v_front_porch);
-			break;
 
-		case DSI_DFPS_IMMEDIATE_HFP:
-			if (abs(DSI_H_TOTAL_DSC(&cur_mode.timing) -
-				DSI_H_TOTAL_DSC(&adj_mode.timing)) > 5)
-				pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
-				adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
-			break;
-
-		default:
-			pr_err("Unsupported DFPS mode %d\n",
-				dfps_caps.type);
-			rc = -ENOTSUPP;
+		/* dynamic clk change use case */
+		if (cur_mode->pixel_clk_khz != adj_mode->pixel_clk_khz) {
+			dyn_clk_caps = &(display->panel->dyn_clk_caps);
+			if (!dyn_clk_caps->dyn_clk_support) {
+				pr_err("dyn clk change not supported\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+			if (adj_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR) {
+				pr_err("dfps and dyn clk not supported in same commit\n");
+				rc = -ENOTSUPP;
+				goto error;
+			}
+		pr_debug("dynamic clk change detected\n");
+		adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
+		SDE_EVT32(cur_mode->pixel_clk_khz,
+				adj_mode->pixel_clk_khz);
 		}
-
-		pr_debug("Mode switch is seamless variable refresh\n");
-		mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
-		SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
-				cur_mode.timing.h_front_porch,
-				adj_mode.timing.h_front_porch);
 	}
 
 error:

+ 4 - 3
msm/dsi/dsi_display.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation.All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_DISPLAY_H_
@@ -410,13 +410,14 @@ int dsi_display_validate_mode(struct dsi_display *display,
 			      u32 flags);
 
 /**
- * dsi_display_validate_mode_vrr() - validates mode if variable refresh case
+ * dsi_display_validate_mode_change() - validates mode if variable refresh case
+ *				or dynamic clk change case
  * @display:             Handle to display.
  * @mode:                Mode to be validated..
  *
  * Return: 0 if  error code.
  */
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
+int dsi_display_validate_mode_change(struct dsi_display *display,
 			struct dsi_display_mode *cur_dsi_mode,
 			struct dsi_display_mode *mode);
 

+ 64 - 7
msm/dsi/dsi_drm.c

@@ -73,6 +73,8 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
 	if (msm_is_mode_seamless_poms(drm_mode))
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_POMS;
+	if (msm_is_mode_seamless_dyn_clk(drm_mode))
+		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
 
 	dsi_mode->timing.h_sync_polarity =
 			!!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC);
@@ -88,6 +90,8 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
 void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 				struct drm_display_mode *drm_mode)
 {
+	bool video_mode = (dsi_mode->panel_mode == DSI_OP_VIDEO_MODE);
+
 	memset(drm_mode, 0, sizeof(*drm_mode));
 
 	drm_mode->hdisplay = dsi_mode->timing.h_active;
@@ -122,6 +126,8 @@ void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
 	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_POMS)
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_POMS;
+	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)
+		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYN_CLK;
 
 	if (dsi_mode->timing.h_sync_polarity)
 		drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
@@ -133,7 +139,11 @@ void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 	if (dsi_mode->panel_mode == DSI_OP_CMD_MODE)
 		drm_mode->flags |= DRM_MODE_FLAG_CMD_MODE_PANEL;
 
-	drm_mode_set_name(drm_mode);
+	/* set mode name */
+	snprintf(drm_mode->name, DRM_DISPLAY_MODE_LEN, "%dx%dx%dx%d%s",
+			drm_mode->hdisplay, drm_mode->vdisplay,
+			drm_mode->vrefresh, drm_mode->clock,
+			video_mode ? "vid" : "cmd");
 }
 
 static int dsi_bridge_attach(struct drm_bridge *bridge)
@@ -178,7 +188,8 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
 	}
 
 	if (c_bridge->dsi_mode.dsi_mode_flags &
-		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
+		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+		 DSI_MODE_FLAG_DYN_CLK)) {
 		pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
 		return;
 	}
@@ -220,7 +231,8 @@ static void dsi_bridge_enable(struct drm_bridge *bridge)
 	}
 
 	if (c_bridge->dsi_mode.dsi_mode_flags &
-			(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
+			(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+			 DSI_MODE_FLAG_DYN_CLK)) {
 		pr_debug("[%d] seamless enable\n", c_bridge->id);
 		return;
 	}
@@ -311,6 +323,12 @@ static void dsi_bridge_mode_set(struct drm_bridge *bridge,
 
 	memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
 	convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+	/* restore bit_clk_rate also for dynamic clk use cases */
+	c_bridge->dsi_mode.timing.clk_rate_hz =
+		dsi_drm_find_bit_clk_rate(c_bridge->display, adjusted_mode);
+
+	pr_debug("clk_rate: %llu\n", c_bridge->dsi_mode.timing.clk_rate_hz);
 }
 
 static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -379,11 +397,13 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 		cur_dsi_mode.timing.dsc_enabled =
 				dsi_mode.priv_info->dsc_enabled;
 		cur_dsi_mode.timing.dsc = &dsi_mode.priv_info->dsc;
-		rc = dsi_display_validate_mode_vrr(c_bridge->display,
+		rc = dsi_display_validate_mode_change(c_bridge->display,
 					&cur_dsi_mode, &dsi_mode);
-		if (rc)
-			pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
+		if (rc) {
+			pr_err("[%s] seamless mode mismatch failure rc=%d\n",
 				c_bridge->display->name, rc);
+			return false;
+		}
 
 		cur_mode = crtc_state->crtc->mode;
 
@@ -397,6 +417,7 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_POMS)) &&
+			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
 			(!crtc_state->active_changed ||
 			 display->is_cont_splash_enabled))
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
@@ -408,6 +429,33 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 	return true;
 }
 
+u64 dsi_drm_find_bit_clk_rate(void *display,
+			      const struct drm_display_mode *drm_mode)
+{
+	int i = 0, count = 0;
+	struct dsi_display *dsi_display = display;
+	struct dsi_display_mode *dsi_mode;
+	u64 bit_clk_rate = 0;
+
+	if (!dsi_display || !drm_mode)
+		return 0;
+
+	dsi_display_get_mode_count(dsi_display, &count);
+
+	for (i = 0; i < count; i++) {
+		dsi_mode = &dsi_display->modes[i];
+		if ((dsi_mode->timing.v_active == drm_mode->vdisplay) &&
+		    (dsi_mode->timing.h_active == drm_mode->hdisplay) &&
+		    (dsi_mode->pixel_clk_khz == drm_mode->clock) &&
+		    (dsi_mode->timing.refresh_rate == drm_mode->vrefresh)) {
+			bit_clk_rate = dsi_mode->timing.clk_rate_hz;
+			break;
+		}
+	}
+
+	return bit_clk_rate;
+}
+
 int dsi_conn_get_mode_info(struct drm_connector *connector,
 		const struct drm_display_mode *drm_mode,
 		struct msm_mode_info *mode_info,
@@ -432,7 +480,7 @@ int dsi_conn_get_mode_info(struct drm_connector *connector,
 	mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
 	mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
 	mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
-	mode_info->clk_rate = dsi_mode.priv_info->clk_rate_hz;
+	mode_info->clk_rate = dsi_drm_find_bit_clk_rate(display, drm_mode);
 	mode_info->mdp_transfer_time_us =
 		dsi_mode.priv_info->mdp_transfer_time_us;
 
@@ -538,6 +586,9 @@ int dsi_conn_set_info_blob(struct drm_connector *connector,
 			panel->dfps_caps.max_refresh_rate);
 	}
 
+	sde_kms_info_add_keystr(info, "dyn bitclk support",
+			panel->dyn_clk_caps.dyn_clk_support ? "true" : "false");
+
 	switch (panel->phy_props.rotation) {
 	case DSI_PANEL_ROTATE_NONE:
 		sde_kms_info_add_keystr(info, "panel orientation", "none");
@@ -796,6 +847,9 @@ int dsi_connector_get_modes(struct drm_connector *connector, void *data)
 		}
 		m->width_mm = connector->display_info.width_mm;
 		m->height_mm = connector->display_info.height_mm;
+		/* set the first mode in list as preferred */
+		if (i == 0)
+			m->type |= DRM_MODE_TYPE_PREFERRED;
 		drm_mode_probed_add(connector, m);
 	}
 
@@ -915,6 +969,9 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
 		c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
 	}
 
+	/* ensure dynamic clk switch flag is reset */
+	c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;
+
 	return 0;
 }
 

+ 2 - 0
msm/dsi/dsi_drm.h

@@ -130,4 +130,6 @@ int dsi_conn_post_kickoff(struct drm_connector *connector);
 void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
 				struct drm_display_mode *drm_mode);
 
+u64 dsi_drm_find_bit_clk_rate(void *display,
+			      const struct drm_display_mode *drm_mode);
 #endif /* _DSI_DRM_H_ */

+ 10 - 0
msm/dsi/dsi_hw.h

@@ -44,4 +44,14 @@
 #define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
 #define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
 
+#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
+	(((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
+	 ((data0) << 8) | (((addr0)/4) & 0xFF))
+
+#define DSI_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1)   \
+	writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+			(base) + (offset))
+
+#define DSI_GEN_R32(base, offset) readl_relaxed(base + (offset))
+#define DSI_GEN_W32(base, offset, val) writel_relaxed((val), base + (offset))
 #endif /* _DSI_HW_H_ */

+ 105 - 56
msm/dsi/dsi_panel.c

@@ -1155,6 +1155,48 @@ static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel,
 	return rc;
 }
 
+static int dsi_panel_parse_dyn_clk_caps(struct dsi_panel *panel)
+{
+	int rc = 0;
+	bool supported = false;
+	struct dsi_dyn_clk_caps *dyn_clk_caps = &panel->dyn_clk_caps;
+	struct dsi_parser_utils *utils = &panel->utils;
+	const char *name = panel->name;
+
+	supported = utils->read_bool(utils->data, "qcom,dsi-dyn-clk-enable");
+
+	if (!supported) {
+		dyn_clk_caps->dyn_clk_support = false;
+		return rc;
+	}
+
+	dyn_clk_caps->bit_clk_list_len = utils->count_u32_elems(utils->data,
+			"qcom,dsi-dyn-clk-list");
+
+	if (dyn_clk_caps->bit_clk_list_len < 1) {
+		pr_err("[%s] failed to get supported bit clk list\n", name);
+		return -EINVAL;
+	}
+
+	dyn_clk_caps->bit_clk_list = kcalloc(dyn_clk_caps->bit_clk_list_len,
+			sizeof(u32), GFP_KERNEL);
+	if (!dyn_clk_caps->bit_clk_list)
+		return -ENOMEM;
+
+	rc = utils->read_u32_array(utils->data, "qcom,dsi-dyn-clk-list",
+			dyn_clk_caps->bit_clk_list,
+			dyn_clk_caps->bit_clk_list_len);
+
+	if (rc) {
+		pr_err("[%s] failed to parse supported bit clk list\n", name);
+		return -EINVAL;
+	}
+
+	dyn_clk_caps->dyn_clk_support = true;
+
+	return 0;
+}
+
 static int dsi_panel_parse_dfps_caps(struct dsi_panel *panel)
 {
 	int rc = 0;
@@ -1163,7 +1205,7 @@ static int dsi_panel_parse_dfps_caps(struct dsi_panel *panel)
 	struct dsi_parser_utils *utils = &panel->utils;
 	const char *name = panel->name;
 	const char *type;
-	u32 val = 0;
+	u32 i;
 
 	supported = utils->read_bool(utils->data,
 			"qcom,mdss-dsi-pan-enable-dynamic-fps");
@@ -1171,66 +1213,64 @@ static int dsi_panel_parse_dfps_caps(struct dsi_panel *panel)
 	if (!supported) {
 		pr_debug("[%s] DFPS is not supported\n", name);
 		dfps_caps->dfps_support = false;
+		return rc;
+	}
+
+	type = utils->get_property(utils->data,
+			"qcom,mdss-dsi-pan-fps-update", NULL);
+	if (!type) {
+		pr_err("[%s] dfps type not defined\n", name);
+		rc = -EINVAL;
+		goto error;
+	} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+		dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+	} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+	} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+	} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+		dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
 	} else {
+		pr_err("[%s] dfps type is not recognized\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
 
-		type = utils->get_property(utils->data,
-				       "qcom,mdss-dsi-pan-fps-update",
-				       NULL);
-		if (!type) {
-			pr_err("[%s] dfps type not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		} else if (!strcmp(type, "dfps_suspend_resume_mode")) {
-			dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
-		} else if (!strcmp(type, "dfps_immediate_clk_mode")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
-		} else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
-			dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
-		} else {
-			pr_err("[%s] dfps type is not recognized\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
+	dfps_caps->dfps_list_len = utils->count_u32_elems(utils->data,
+				  "qcom,dsi-supported-dfps-list");
+	if (dfps_caps->dfps_list_len < 1) {
+		pr_err("[%s] dfps refresh list not present\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
 
-		rc = utils->read_u32(utils->data,
-					  "qcom,mdss-dsi-min-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_err("[%s] Min refresh rate is not defined\n", name);
-			rc = -EINVAL;
-			goto error;
-		}
-		dfps_caps->min_refresh_rate = val;
+	dfps_caps->dfps_list = kcalloc(dfps_caps->dfps_list_len, sizeof(u32),
+			GFP_KERNEL);
+	if (!dfps_caps->dfps_list) {
+		rc = -ENOMEM;
+		goto error;
+	}
 
-		rc = utils->read_u32(utils->data,
-					  "qcom,mdss-dsi-max-refresh-rate",
-					  &val);
-		if (rc) {
-			pr_debug("[%s] Using default refresh rate\n", name);
-			rc = utils->read_u32(utils->data,
-						"qcom,mdss-dsi-panel-framerate",
-						&val);
-			if (rc) {
-				pr_err("[%s] max refresh rate is not defined\n",
-				       name);
-				rc = -EINVAL;
-				goto error;
-			}
-		}
-		dfps_caps->max_refresh_rate = val;
+	rc = utils->read_u32_array(utils->data,
+			"qcom,dsi-supported-dfps-list",
+			dfps_caps->dfps_list,
+			dfps_caps->dfps_list_len);
+	if (rc) {
+		pr_err("[%s] dfps refresh rate list parse failed\n", name);
+		rc = -EINVAL;
+		goto error;
+	}
+	dfps_caps->dfps_support = true;
 
-		if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
-			pr_err("[%s] min rate > max rate\n", name);
-			rc = -EINVAL;
-		}
+	/* calculate max and min fps */
+	dfps_caps->max_refresh_rate = dfps_caps->dfps_list[0];
+	dfps_caps->min_refresh_rate = dfps_caps->dfps_list[0];
 
-		pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
-				dfps_caps->min_refresh_rate,
-				dfps_caps->max_refresh_rate,
-				dfps_caps->type);
-		dfps_caps->dfps_support = true;
+	for (i = 1; i < dfps_caps->dfps_list_len; i++) {
+		if (dfps_caps->dfps_list[i] < dfps_caps->min_refresh_rate)
+			dfps_caps->min_refresh_rate = dfps_caps->dfps_list[i];
+		else if (dfps_caps->dfps_list[i] > dfps_caps->max_refresh_rate)
+			dfps_caps->max_refresh_rate = dfps_caps->dfps_list[i];
 	}
 
 error:
@@ -3053,6 +3093,10 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
 			pr_err("failed to parse qsync features, rc=%d\n", rc);
 	}
 
+	rc = dsi_panel_parse_dyn_clk_caps(panel);
+	if (rc)
+		pr_err("failed to parse dynamic clk config, rc=%d\n", rc);
+
 	rc = dsi_panel_parse_phy_props(panel);
 	if (rc) {
 		pr_err("failed to parse panel physical dimension, rc=%d\n", rc);
@@ -3475,6 +3519,7 @@ int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
 				    struct dsi_host_config *config)
 {
 	int rc = 0;
+	struct dsi_dyn_clk_caps *dyn_clk_caps = &panel->dyn_clk_caps;
 
 	if (!panel || !mode || !config) {
 		pr_err("invalid params\n");
@@ -3502,7 +3547,11 @@ int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
 	config->video_timing.dsc_enabled = mode->priv_info->dsc_enabled;
 	config->video_timing.dsc = &mode->priv_info->dsc;
 
-	config->bit_clk_rate_hz_override = mode->priv_info->clk_rate_hz;
+	if (dyn_clk_caps->dyn_clk_support)
+		config->bit_clk_rate_hz_override = mode->timing.clk_rate_hz;
+	else
+		config->bit_clk_rate_hz_override = mode->priv_info->clk_rate_hz;
+
 	config->esc_clk_rate_hz = 19200000;
 	mutex_unlock(&panel->panel_lock);
 	return rc;

+ 10 - 1
msm/dsi/dsi_panel.h

@@ -63,10 +63,18 @@ enum dsi_dms_mode {
 };
 
 struct dsi_dfps_capabilities {
-	bool dfps_support;
 	enum dsi_dfps_type type;
 	u32 min_refresh_rate;
 	u32 max_refresh_rate;
+	u32 *dfps_list;
+	u32 dfps_list_len;
+	bool dfps_support;
+};
+
+struct dsi_dyn_clk_caps {
+	bool dyn_clk_support;
+	u32 *bit_clk_list;
+	u32 bit_clk_list_len;
 };
 
 struct dsi_pinctrl_info {
@@ -159,6 +167,7 @@ struct dsi_panel {
 	bool panel_mode_switch_enabled;
 
 	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_dyn_clk_caps dyn_clk_caps;
 	struct dsi_panel_phy_props phy_props;
 
 	struct dsi_display_mode *cur_mode;

+ 126 - 4
msm/dsi/dsi_phy.c

@@ -118,6 +118,9 @@ static int dsi_phy_regmap_init(struct platform_device *pdev,
 
 	phy->hw.base = ptr;
 
+	ptr = msm_ioremap(pdev, "dyn_refresh_base", phy->name);
+	phy->hw.dyn_pll_base = ptr;
+
 	pr_debug("[%s] map dsi_phy registers to %pK\n",
 		phy->name, phy->hw.base);
 
@@ -641,11 +644,8 @@ int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
 		return -EINVAL;
 	}
 
-	mutex_lock(&dsi_phy->phy_lock);
-
 	pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
 
-	mutex_unlock(&dsi_phy->phy_lock);
 	return rc;
 }
 
@@ -887,7 +887,7 @@ int dsi_phy_enable(struct msm_dsi_phy *phy,
 		rc = phy->hw.ops.calculate_timing_params(&phy->hw,
 						 &phy->mode,
 						 &config->common_config,
-						 &phy->cfg.timing);
+						 &phy->cfg.timing, false);
 	if (rc) {
 		pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
 		goto error;
@@ -905,6 +905,27 @@ error:
 	return rc;
 }
 
+/* update dsi phy timings for dynamic clk switch use case */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+			       struct dsi_host_config *config)
+{
+	int rc = 0;
+
+	if (!phy || !config) {
+		pr_err("invalid argument\n");
+		return -EINVAL;
+	}
+
+	memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+	rc = phy->hw.ops.calculate_timing_params(&phy->hw, &phy->mode,
+						 &config->common_config,
+						 &phy->cfg.timing, true);
+	if (rc)
+		pr_err("failed to calculate phy timings %d\n", rc);
+
+	return rc;
+}
+
 int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
 {
 	int ret = 0;
@@ -1069,6 +1090,7 @@ int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
 		rc = phy->hw.ops.phy_timing_val(&phy->cfg.timing, timing, size);
 	if (!rc)
 		phy->cfg.is_phy_timing_present = true;
+
 	mutex_unlock(&phy->phy_lock);
 	return rc;
 }
@@ -1117,6 +1139,106 @@ int dsi_phy_conv_logical_to_phy_lane(
 	return i;
 }
 
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy:	DSI PHY handle
+ * @delay:	pipe delays for dynamic refresh
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+		struct dsi_dyn_clk_delay *delay,
+		bool is_master)
+{
+	struct dsi_phy_cfg *cfg;
+
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+
+	cfg = &phy->cfg;
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_config)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_config(&phy->hw, cfg,
+				is_master);
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay(
+				&phy->hw, delay);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy:	DSI PHY handle
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master)
+{
+	u32 off;
+
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+	/*
+	 * program PLL_SWI_INTF_SEL and SW_TRIGGER bit only for
+	 * master and program SYNC_MODE bit only for slave.
+	 */
+	if (is_master)
+		off = BIT(DYN_REFRESH_INTF_SEL) | BIT(DYN_REFRESH_SWI_CTRL) |
+			BIT(DYN_REFRESH_SW_TRIGGER);
+	else
+		off = BIT(DYN_REFRESH_SYNC_MODE) | BIT(DYN_REFRESH_SWI_CTRL);
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, off);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_cache_phy_timings - cache the phy timings calculated as part of
+ *				dynamic refresh.
+ * @phy:	   DSI PHY Handle.
+ * @dst:	   Pointer to cache location.
+ * @size:	   Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy, u32 *dst,
+		u32 size)
+{
+	int rc = 0;
+
+	if (!phy || !dst || !size)
+		return -EINVAL;
+
+	if (phy->hw.ops.dyn_refresh_ops.cache_phy_timings)
+		rc = phy->hw.ops.dyn_refresh_ops.cache_phy_timings(
+				&phy->cfg.timing, dst, size);
+
+	if (rc)
+		pr_err("failed to cache phy timings %d\n", rc);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy:	DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy)
+{
+	if (!phy)
+		return;
+
+	mutex_lock(&phy->phy_lock);
+
+	if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+		phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, 0);
+
+	mutex_unlock(&phy->phy_lock);
+}
+
 void dsi_phy_drv_register(void)
 {
 	platform_driver_register(&dsi_phy_platform_driver);

+ 41 - 0
msm/dsi/dsi_phy.h

@@ -290,4 +290,45 @@ void dsi_phy_drv_register(void);
  */
 void dsi_phy_drv_unregister(void);
 
+/**
+ * dsi_phy_update_phy_timings() - Update dsi phy timings
+ * @phy:	DSI PHY handle
+ * @config:	DSI Host config parameters
+ *
+ * Return: error code.
+ */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+			       struct dsi_host_config *config);
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy:	DSI PHY handle
+ * @delay:	pipe delays for dynamic refresh
+ * @is_master:	Boolean to indicate if for master or slave
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+				    struct dsi_dyn_clk_delay *delay,
+				    bool is_master);
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy:	DSI PHY handle
+ * @is_master:	Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master);
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy:	DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_dyn_refresh_cache_phy_timings - cache the phy timings calculated
+ *				as part of dynamic refresh.
+ * @phy:	   DSI PHY Handle.
+ * @dst:	   Pointer to cache location.
+ * @size:	   Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy,
+					  u32 *dst, u32 size);
 #endif /* _DSI_PHY_H_ */

+ 46 - 1
msm/dsi/dsi_phy_hw.h

@@ -161,6 +161,43 @@ struct phy_ulps_config_ops {
 	bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
 };
 
+struct phy_dyn_refresh_ops {
+	/**
+	 * dyn_refresh_helper - helper function to config particular registers
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @offset:         register offset to program.
+	 */
+	void (*dyn_refresh_helper)(struct dsi_phy_hw *phy, u32 offset);
+
+	/**
+	 * dyn_refresh_config - configure dynamic refresh ctrl registers
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @cfg:	   Pointer to DSI PHY timings.
+	 * @is_master:	   Boolean to indicate whether for master or slave.
+	 */
+	void (*dyn_refresh_config)(struct dsi_phy_hw *phy,
+				   struct dsi_phy_cfg *cfg, bool is_master);
+
+	/**
+	 * dyn_refresh_pipe_delay - configure pipe delay registers for dynamic
+	 *				refresh.
+	 * @phy:           Pointer to DSI PHY hardware instance.
+	 * @delay:	   structure containing all the delays to be programed.
+	 */
+	void (*dyn_refresh_pipe_delay)(struct dsi_phy_hw *phy,
+				      struct dsi_dyn_clk_delay *delay);
+
+	/**
+	 * cache_phy_timings - cache the phy timings calculated as part of
+	 *				dynamic refresh.
+	 * @timings:       Pointer to calculated phy timing parameters.
+	 * @dst:	   Pointer to cache location.
+	 * @size:	   Number of phy lane settings.
+	 */
+	int (*cache_phy_timings)(struct dsi_phy_per_lane_cfgs *timings,
+				  u32 *dst, u32 size);
+};
+
 /**
  * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
  * @regulator_enable:          Enable PHY regulators.
@@ -220,11 +257,14 @@ struct dsi_phy_hw_ops {
 	 * @mode:     Mode information for which timing has to be calculated.
 	 * @config:   DSI host configuration for this mode.
 	 * @timing:   Timing parameters for each lane which will be returned.
+	 * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+	 *		bitclk or use the existing bitclk(for dynamic clk case).
 	 */
 	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
 				       struct dsi_mode_info *mode,
 				       struct dsi_host_common_cfg *config,
-				       struct dsi_phy_per_lane_cfgs *timing);
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk);
 
 	/**
 	 * phy_timing_val() - Gets PHY timing values.
@@ -265,12 +305,15 @@ struct dsi_phy_hw_ops {
 
 	void *timing_ops;
 	struct phy_ulps_config_ops ulps_ops;
+	struct phy_dyn_refresh_ops dyn_refresh_ops;
 };
 
 /**
  * struct dsi_phy_hw - DSI phy hardware object specific to an instance
  * @base:                  VA for the DSI PHY base address.
  * @length:                Length of the DSI PHY register base map.
+ * @dyn_pll_base:      VA for the DSI dynamic refresh base address.
+ * @length:                Length of the DSI dynamic refresh register base map.
  * @index:                 Instance ID of the controller.
  * @version:               DSI PHY version.
  * @phy_clamp_base:        Base address of phy clamp register map.
@@ -280,6 +323,8 @@ struct dsi_phy_hw_ops {
 struct dsi_phy_hw {
 	void __iomem *base;
 	u32 length;
+	void __iomem *dyn_pll_base;
+	u32 dyn_refresh_len;
 	u32 index;
 
 	enum dsi_phy_version version;

+ 201 - 0
msm/dsi/dsi_phy_hw_v3_0.c

@@ -58,6 +58,47 @@
 #define DSIPHY_LNX_LPRX_CTRL(n)                    (0x228 + (0x80 * (n)))
 #define DSIPHY_LNX_TX_DCTRL(n)                     (0x22C + (0x80 * (n)))
 
+/* dynamic refresh control registers */
+#define DSI_DYN_REFRESH_CTRL                   (0x000)
+#define DSI_DYN_REFRESH_PIPE_DELAY             (0x004)
+#define DSI_DYN_REFRESH_PIPE_DELAY2            (0x008)
+#define DSI_DYN_REFRESH_PLL_DELAY              (0x00C)
+#define DSI_DYN_REFRESH_STATUS                 (0x010)
+#define DSI_DYN_REFRESH_PLL_CTRL0              (0x014)
+#define DSI_DYN_REFRESH_PLL_CTRL1              (0x018)
+#define DSI_DYN_REFRESH_PLL_CTRL2              (0x01C)
+#define DSI_DYN_REFRESH_PLL_CTRL3              (0x020)
+#define DSI_DYN_REFRESH_PLL_CTRL4              (0x024)
+#define DSI_DYN_REFRESH_PLL_CTRL5              (0x028)
+#define DSI_DYN_REFRESH_PLL_CTRL6              (0x02C)
+#define DSI_DYN_REFRESH_PLL_CTRL7              (0x030)
+#define DSI_DYN_REFRESH_PLL_CTRL8              (0x034)
+#define DSI_DYN_REFRESH_PLL_CTRL9              (0x038)
+#define DSI_DYN_REFRESH_PLL_CTRL10             (0x03C)
+#define DSI_DYN_REFRESH_PLL_CTRL11             (0x040)
+#define DSI_DYN_REFRESH_PLL_CTRL12             (0x044)
+#define DSI_DYN_REFRESH_PLL_CTRL13             (0x048)
+#define DSI_DYN_REFRESH_PLL_CTRL14             (0x04C)
+#define DSI_DYN_REFRESH_PLL_CTRL15             (0x050)
+#define DSI_DYN_REFRESH_PLL_CTRL16             (0x054)
+#define DSI_DYN_REFRESH_PLL_CTRL17             (0x058)
+#define DSI_DYN_REFRESH_PLL_CTRL18             (0x05C)
+#define DSI_DYN_REFRESH_PLL_CTRL19             (0x060)
+#define DSI_DYN_REFRESH_PLL_CTRL20             (0x064)
+#define DSI_DYN_REFRESH_PLL_CTRL21             (0x068)
+#define DSI_DYN_REFRESH_PLL_CTRL22             (0x06C)
+#define DSI_DYN_REFRESH_PLL_CTRL23             (0x070)
+#define DSI_DYN_REFRESH_PLL_CTRL24             (0x074)
+#define DSI_DYN_REFRESH_PLL_CTRL25             (0x078)
+#define DSI_DYN_REFRESH_PLL_CTRL26             (0x07C)
+#define DSI_DYN_REFRESH_PLL_CTRL27             (0x080)
+#define DSI_DYN_REFRESH_PLL_CTRL28             (0x084)
+#define DSI_DYN_REFRESH_PLL_CTRL29             (0x088)
+#define DSI_DYN_REFRESH_PLL_CTRL30             (0x08C)
+#define DSI_DYN_REFRESH_PLL_CTRL31             (0x090)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR         (0x094)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2        (0x098)
+
 /**
  * regulator_enable() - enable regulators for DSI PHY
  * @phy:      Pointer to DSI PHY hardware object.
@@ -462,3 +503,163 @@ int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
 		timing_cfg->lane_v3[i] = timing_val[i];
 	return 0;
 }
+
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+					struct dsi_phy_cfg *cfg, bool is_master)
+{
+	u32 reg;
+
+	if (is_master) {
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_GLBL_CTRL, DSIPHY_CMN_VREG_CTRL,
+			  0x10, 0x59);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
+			  DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
+			  cfg->timing.lane_v3[0], cfg->timing.lane_v3[1]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
+			  DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
+			  cfg->timing.lane_v3[2], cfg->timing.lane_v3[3]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
+			  DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
+			  cfg->timing.lane_v3[4], cfg->timing.lane_v3[5]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
+			  DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
+			  cfg->timing.lane_v3[6], cfg->timing.lane_v3[7]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
+			  DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
+			  cfg->timing.lane_v3[8], cfg->timing.lane_v3[9]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
+			  DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
+			  cfg->timing.lane_v3[10], cfg->timing.lane_v3[11]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL16,
+			  DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
+			  0x7f, 0x1f);
+	} else {
+		reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG0);
+		reg &= ~BIT(5);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
+			  DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_PLL_CNTRL,
+			  reg, 0x0);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
+			  DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_GLBL_CTRL,
+			  0x0, 0x10);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
+			  DSIPHY_CMN_VREG_CTRL, DSIPHY_CMN_TIMING_CTRL_0,
+			  0x59, cfg->timing.lane_v3[0]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
+			  DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
+			  cfg->timing.lane_v3[1], cfg->timing.lane_v3[2]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
+			  DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
+			  cfg->timing.lane_v3[3], cfg->timing.lane_v3[4]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
+			  DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
+			  cfg->timing.lane_v3[5], cfg->timing.lane_v3[6]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
+			  DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
+			  cfg->timing.lane_v3[7], cfg->timing.lane_v3[8]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
+			  DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
+			  cfg->timing.lane_v3[9], cfg->timing.lane_v3[10]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
+			  DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_CTRL_0,
+			  cfg->timing.lane_v3[11], 0x7f);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
+			  0x1f, 0x40);
+		/*
+		 * fill with dummy register writes since controller will blindly
+		 * send these values to DSI PHY.
+		 */
+		reg = DSI_DYN_REFRESH_PLL_CTRL11;
+		while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
+			DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
+				  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
+				  0x1f, 0x7f);
+			reg += 0x4;
+		}
+
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
+	}
+
+	wmb(); /* make sure all registers are updated */
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay)
+{
+	if (!delay)
+		return;
+
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
+		    delay->pipe_delay);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
+		    delay->pipe_delay2);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
+		    delay->pll_delay);
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
+{
+	u32 reg;
+
+	/*
+	 * if no offset is mentioned then this means we want to clear
+	 * the dynamic refresh ctrl register which is the last step
+	 * of dynamic refresh sequence.
+	 */
+	if (!offset) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg &= ~(BIT(0) | BIT(8));
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is cleared */
+		return;
+	}
+
+	if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(13);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(16);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(0);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(8);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is triggered */
+	}
+}
+
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size)
+{
+	int i;
+
+	if (!timings || !dst || !size)
+		return -EINVAL;
+
+	if (size != DSI_PHY_TIMING_V3_SIZE) {
+		pr_err("size mis-match\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		dst[i] = timings->lane_v3[i];
+
+	return 0;
+}

+ 203 - 1
msm/dsi/dsi_phy_hw_v4_0.c

@@ -62,7 +62,6 @@
 #define DSIPHY_CMN_LANE_STATUS0						0x148
 #define DSIPHY_CMN_LANE_STATUS1						0x14C
 
-
 /* n = 0..3 for data lanes and n = 4 for clock lane */
 #define DSIPHY_LNX_CFG0(n)                         (0x200 + (0x80 * (n)))
 #define DSIPHY_LNX_CFG1(n)                         (0x204 + (0x80 * (n)))
@@ -72,6 +71,47 @@
 #define DSIPHY_LNX_LPRX_CTRL(n)                    (0x214 + (0x80 * (n)))
 #define DSIPHY_LNX_TX_DCTRL(n)                     (0x218 + (0x80 * (n)))
 
+/* dynamic refresh control registers */
+#define DSI_DYN_REFRESH_CTRL                   (0x000)
+#define DSI_DYN_REFRESH_PIPE_DELAY             (0x004)
+#define DSI_DYN_REFRESH_PIPE_DELAY2            (0x008)
+#define DSI_DYN_REFRESH_PLL_DELAY              (0x00C)
+#define DSI_DYN_REFRESH_STATUS                 (0x010)
+#define DSI_DYN_REFRESH_PLL_CTRL0              (0x014)
+#define DSI_DYN_REFRESH_PLL_CTRL1              (0x018)
+#define DSI_DYN_REFRESH_PLL_CTRL2              (0x01C)
+#define DSI_DYN_REFRESH_PLL_CTRL3              (0x020)
+#define DSI_DYN_REFRESH_PLL_CTRL4              (0x024)
+#define DSI_DYN_REFRESH_PLL_CTRL5              (0x028)
+#define DSI_DYN_REFRESH_PLL_CTRL6              (0x02C)
+#define DSI_DYN_REFRESH_PLL_CTRL7              (0x030)
+#define DSI_DYN_REFRESH_PLL_CTRL8              (0x034)
+#define DSI_DYN_REFRESH_PLL_CTRL9              (0x038)
+#define DSI_DYN_REFRESH_PLL_CTRL10             (0x03C)
+#define DSI_DYN_REFRESH_PLL_CTRL11             (0x040)
+#define DSI_DYN_REFRESH_PLL_CTRL12             (0x044)
+#define DSI_DYN_REFRESH_PLL_CTRL13             (0x048)
+#define DSI_DYN_REFRESH_PLL_CTRL14             (0x04C)
+#define DSI_DYN_REFRESH_PLL_CTRL15             (0x050)
+#define DSI_DYN_REFRESH_PLL_CTRL16             (0x054)
+#define DSI_DYN_REFRESH_PLL_CTRL17             (0x058)
+#define DSI_DYN_REFRESH_PLL_CTRL18             (0x05C)
+#define DSI_DYN_REFRESH_PLL_CTRL19             (0x060)
+#define DSI_DYN_REFRESH_PLL_CTRL20             (0x064)
+#define DSI_DYN_REFRESH_PLL_CTRL21             (0x068)
+#define DSI_DYN_REFRESH_PLL_CTRL22             (0x06C)
+#define DSI_DYN_REFRESH_PLL_CTRL23             (0x070)
+#define DSI_DYN_REFRESH_PLL_CTRL24             (0x074)
+#define DSI_DYN_REFRESH_PLL_CTRL25             (0x078)
+#define DSI_DYN_REFRESH_PLL_CTRL26             (0x07C)
+#define DSI_DYN_REFRESH_PLL_CTRL27             (0x080)
+#define DSI_DYN_REFRESH_PLL_CTRL28             (0x084)
+#define DSI_DYN_REFRESH_PLL_CTRL29             (0x088)
+#define DSI_DYN_REFRESH_PLL_CTRL30             (0x08C)
+#define DSI_DYN_REFRESH_PLL_CTRL31             (0x090)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR         (0x094)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2        (0x098)
+
 static int dsi_phy_hw_v4_0_is_pll_on(struct dsi_phy_hw *phy)
 {
 	u32 data = 0;
@@ -481,3 +521,165 @@ int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
 		timing_cfg->lane_v4[i] = timing_val[i];
 	return 0;
 }
+
+void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+					struct dsi_phy_cfg *cfg, bool is_master)
+{
+	u32 reg;
+
+	if (is_master) {
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL19,
+			  DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
+			  cfg->timing.lane_v4[0], cfg->timing.lane_v4[1]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL20,
+			  DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
+			  cfg->timing.lane_v4[2], cfg->timing.lane_v4[3]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL21,
+			  DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
+			  cfg->timing.lane_v4[4], cfg->timing.lane_v4[5]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL22,
+			  DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
+			  cfg->timing.lane_v4[6], cfg->timing.lane_v4[7]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL23,
+			  DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
+			  cfg->timing.lane_v4[8], cfg->timing.lane_v4[9]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL24,
+			  DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
+			  cfg->timing.lane_v4[10], cfg->timing.lane_v4[11]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL25,
+			  DSIPHY_CMN_TIMING_CTRL_12, DSIPHY_CMN_TIMING_CTRL_13,
+			  cfg->timing.lane_v4[12], cfg->timing.lane_v4[13]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL26,
+			  DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
+			  0x7f, 0x1f);
+
+	} else {
+		reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
+		reg &= ~BIT(5);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
+			  DSIPHY_CMN_CLK_CFG1, DSIPHY_CMN_PLL_CNTRL,
+			  reg, 0x0);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
+			  DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_TIMING_CTRL_0,
+			  0x0, cfg->timing.lane_v4[0]);
+
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
+			  DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
+			  cfg->timing.lane_v4[1], cfg->timing.lane_v4[2]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
+			  DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
+			  cfg->timing.lane_v4[3], cfg->timing.lane_v4[4]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
+			  DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
+			  cfg->timing.lane_v4[5], cfg->timing.lane_v4[6]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
+			  DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
+			  cfg->timing.lane_v4[7], cfg->timing.lane_v4[8]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
+			  DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
+			  cfg->timing.lane_v4[9], cfg->timing.lane_v4[10]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
+			  DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_TIMING_CTRL_12,
+			  cfg->timing.lane_v4[11], cfg->timing.lane_v4[12]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
+			  DSIPHY_CMN_TIMING_CTRL_13, DSIPHY_CMN_CTRL_0,
+			  cfg->timing.lane_v4[13], 0x7f);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+			  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
+			  0x1f, 0x40);
+		/*
+		 * fill with dummy register writes since controller will blindly
+		 * send these values to DSI PHY.
+		 */
+		reg = DSI_DYN_REFRESH_PLL_CTRL11;
+		while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
+			DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
+				  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
+				  0x1f, 0x7f);
+			reg += 0x4;
+		}
+
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
+		DSI_GEN_W32(phy->dyn_pll_base,
+			    DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
+	}
+
+	wmb(); /* make sure all registers are updated */
+}
+
+void dsi_phy_hw_v4_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay)
+{
+	if (!delay)
+		return;
+
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
+		    delay->pipe_delay);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
+		    delay->pipe_delay2);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
+		    delay->pll_delay);
+}
+
+void dsi_phy_hw_v4_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
+{
+	u32 reg;
+
+	/*
+	 * if no offset is mentioned then this means we want to clear
+	 * the dynamic refresh ctrl register which is the last step
+	 * of dynamic refresh sequence.
+	 */
+	if (!offset) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg &= ~(BIT(0) | BIT(8));
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is cleared */
+		return;
+	}
+
+	if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(13);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(16);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(0);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(8);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is triggered */
+	}
+}
+
+int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size)
+{
+	int i;
+
+	if (!timings || !dst || !size)
+		return -EINVAL;
+
+	if (size != DSI_PHY_TIMING_V4_SIZE) {
+		pr_err("size mis-match\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		dst[i] = timings->lane_v4[i];
+
+	return 0;
+}

+ 10 - 4
msm/dsi/dsi_phy_timing_calc.c

@@ -630,11 +630,14 @@ error:
  * @mode:     Mode information for which timing has to be calculated.
  * @config:   DSI host configuration for this mode.
  * @timing:   Timing parameters for each lane which will be returned.
+ * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+ *		bit clk or use the existing bit clk(for dynamic clk case).
  */
 int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
-					    struct dsi_mode_info *mode,
-					    struct dsi_host_common_cfg *host,
-					   struct dsi_phy_per_lane_cfgs *timing)
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *host,
+				       struct dsi_phy_per_lane_cfgs *timing,
+				       bool use_mode_bit_clk)
 {
 	/* constants */
 	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
@@ -677,7 +680,10 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
 		num_of_lanes++;
 
 
-	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+	if (use_mode_bit_clk)
+		x = mode->clk_rate_hz;
+	else
+		x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
 	y = rounddown(x, 1);
 
 	clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);

+ 6 - 1
msm/msm_atomic.c

@@ -87,7 +87,8 @@ static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
 
 	if (msm_is_mode_seamless(&crtc_state->mode) ||
 		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
-		msm_is_mode_seamless_poms(&crtc_state->adjusted_mode))
+		msm_is_mode_seamless_poms(&crtc_state->adjusted_mode) ||
+		msm_is_mode_seamless_dyn_clk(&crtc_state->adjusted_mode))
 		return true;
 
 	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
@@ -132,6 +133,10 @@ static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
 			&connector->encoder->crtc->state->adjusted_mode))
 		return true;
 
+	if (msm_is_mode_seamless_dyn_clk(
+			 &connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
 	if (msm_is_mode_seamless_dms(
 			&connector->encoder->crtc->state->adjusted_mode))
 		return true;

+ 9 - 0
msm/msm_kms.h

@@ -40,6 +40,8 @@
 #define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
 /* Request to switch the panel mode */
 #define MSM_MODE_FLAG_SEAMLESS_POMS			(1<<4)
+/* Request to switch the bit clk */
+#define MSM_MODE_FLAG_SEAMLESS_DYN_CLK			(1<<5)
 
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
@@ -221,6 +223,13 @@ static inline bool msm_is_mode_seamless_poms(
 		: false;
 }
 
+static inline bool msm_is_mode_seamless_dyn_clk(
+					const struct drm_display_mode *mode)
+{
+	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYN_CLK)
+		: false;
+}
+
 static inline bool msm_needs_vblank_pre_modeset(
 		const struct drm_display_mode *mode)
 {

+ 3 - 0
msm/sde/sde_connector.c

@@ -2092,6 +2092,9 @@ static int sde_connector_populate_mode_info(struct drm_connector *conn,
 
 		sde_kms_info_add_keystr(info, "mode_name", mode->name);
 
+		sde_kms_info_add_keyint(info, "bit_clk_rate",
+					mode_info.clk_rate);
+
 		topology_idx = (int)sde_rm_get_topology_name(
 							mode_info.topology);
 		if (topology_idx < SDE_RM_TOPOLOGY_MAX) {

+ 6 - 3
msm/sde/sde_crtc.c

@@ -430,8 +430,9 @@ static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
 	SDE_DEBUG("\n");
 
 	if ((msm_is_mode_seamless(adjusted_mode) ||
-			msm_is_mode_seamless_vrr(adjusted_mode)) &&
-		(!crtc->enabled)) {
+	     (msm_is_mode_seamless_vrr(adjusted_mode) ||
+	      msm_is_mode_seamless_dyn_clk(adjusted_mode))) &&
+	    (!crtc->enabled)) {
 		SDE_ERROR("crtc state prevents seamless transition\n");
 		return false;
 	}
@@ -4056,7 +4057,9 @@ static void sde_crtc_enable(struct drm_crtc *crtc,
 
 	/* return early if crtc is already enabled, do this after UIDLE check */
 	if (sde_crtc->enabled) {
-		if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+		if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode) ||
+		msm_is_mode_seamless_dyn_clk(&crtc->state->adjusted_mode))
+
 			SDE_DEBUG("%s extra crtc enable expected during DMS\n",
 					sde_crtc->name);
 		else

+ 18 - 7
msm/sde/sde_encoder.c

@@ -2751,7 +2751,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	struct sde_rm_hw_iter dsc_iter, pp_iter, qdss_iter;
 	struct sde_rm_hw_request request_hw;
 	enum sde_intf_mode intf_mode;
-
+	bool is_cmd_mode = false;
 	int i = 0, ret;
 
 	if (!drm_enc) {
@@ -2767,6 +2767,9 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	SDE_DEBUG_ENC(sde_enc, "\n");
 
+	if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
+		is_cmd_mode = true;
+
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
 	connector_list = &sde_kms->dev->mode_config.connector_list;
@@ -2815,7 +2818,9 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 		sde_encoder_virt_mode_switch(intf_mode, sde_enc, adj_mode);
 
 	/* release resources before seamless mode change */
-	if (msm_is_mode_seamless_dms(adj_mode)) {
+	if (msm_is_mode_seamless_dms(adj_mode) ||
+			(msm_is_mode_seamless_dyn_clk(adj_mode) &&
+			 is_cmd_mode)) {
 		/* restore resource state before releasing them */
 		ret = sde_encoder_resource_control(drm_enc,
 				SDE_ENC_RC_EVENT_PRE_MODESET);
@@ -2905,7 +2910,9 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	}
 
 	/* update resources after seamless mode change */
-	if (msm_is_mode_seamless_dms(adj_mode))
+	if (msm_is_mode_seamless_dms(adj_mode) ||
+			(msm_is_mode_seamless_dyn_clk(adj_mode) &&
+			is_cmd_mode))
 		sde_encoder_resource_control(&sde_enc->base,
 						SDE_ENC_RC_EVENT_POST_MODESET);
 }
@@ -3190,7 +3197,8 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 	}
 
 	/* register input handler if not already registered */
-	if (sde_enc->input_handler && !msm_is_mode_seamless_dms(cur_mode)) {
+	if (sde_enc->input_handler && !msm_is_mode_seamless_dms(cur_mode) &&
+			!msm_is_mode_seamless_dyn_clk(cur_mode)) {
 		ret = _sde_encoder_input_handler_register(
 				sde_enc->input_handler);
 		if (ret)
@@ -3199,7 +3207,8 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 	}
 
 	if (!(msm_is_mode_seamless_vrr(cur_mode)
-			|| msm_is_mode_seamless_dms(cur_mode)))
+			|| msm_is_mode_seamless_dms(cur_mode)
+			|| msm_is_mode_seamless_dyn_clk(cur_mode)))
 		kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
 
@@ -3235,7 +3244,8 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 			 * already. Invoke restore to reconfigure the
 			 * new mode.
 			 */
-			if (msm_is_mode_seamless_dms(cur_mode) &&
+			if ((msm_is_mode_seamless_dms(cur_mode) ||
+				msm_is_mode_seamless_dyn_clk(cur_mode)) &&
 					phys->ops.restore)
 				phys->ops.restore(phys);
 			else if (phys->ops.enable)
@@ -3248,7 +3258,8 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
 						sde_enc->misr_frame_count);
 	}
 
-	if (msm_is_mode_seamless_dms(cur_mode) &&
+	if ((msm_is_mode_seamless_dms(cur_mode) ||
+			msm_is_mode_seamless_dyn_clk(cur_mode)) &&
 			sde_enc->cur_master->ops.restore)
 		sde_enc->cur_master->ops.restore(sde_enc->cur_master);
 	else if (sde_enc->cur_master->ops.enable)

+ 592 - 28
pll/dsi_pll_10nm.c

@@ -87,6 +87,9 @@
 #define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x15c
 #define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x164
 #define PLL_FASTLOCK_EN_BAND			0x16c
+#define PLL_FREQ_TUNE_ACCUM_INIT_LOW		0x170
+#define PLL_FREQ_TUNE_ACCUM_INIT_MID		0x174
+#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH		0x178
 #define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x17c
 #define PLL_PLL_LOCK_OVERRIDE			0x180
 #define PLL_PLL_LOCK_DELAY			0x184
@@ -104,6 +107,7 @@
 #define PHY_CMN_RBUF_CTRL	0x01c
 #define PHY_CMN_PLL_CNTRL	0x038
 #define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_2		0x02c
 
 /* Bit definition of SSC control registers */
 #define SSC_CENTER		BIT(0)
@@ -115,6 +119,43 @@
 #define SSC_START		BIT(6)
 #define SSC_START_MUX		BIT(7)
 
+/* Dynamic Refresh Control Registers */
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0		(0x014)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1		(0x018)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2		(0x01C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3		(0x020)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4		(0x024)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5		(0x028)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6		(0x02C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7		(0x030)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8		(0x034)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9		(0x038)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10		(0x03C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11		(0x040)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12		(0x044)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13		(0x048)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14		(0x04C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		(0x050)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16		(0x054)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17		(0x058)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18		(0x05C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		(0x060)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		(0x064)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		(0x068)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		(0x06C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		(0x070)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		(0x074)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		(0x078)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		(0x07C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		(0x080)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		(0x084)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		(0x088)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30		(0x08C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31		(0x090)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	(0x094)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	(0x098)
+
+#define DSI_PHY_TO_PLL_OFFSET	(0x600)
 enum {
 	DSI_PLL_0,
 	DSI_PLL_1,
@@ -621,6 +662,7 @@ static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
 
 	rsc->vco_current_rate = rate;
 	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+	rsc->dfps_trigger = false;
 
 	rc = mdss_pll_resource_enable(rsc, true);
 	if (rc) {
@@ -651,6 +693,237 @@ static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
 	return 0;
 }
 
+static int dsi_pll_read_stored_trim_codes(struct mdss_pll_resources *pll_res,
+					  unsigned long vco_clk_rate)
+{
+	int i;
+	bool found = false;
+
+	if (!pll_res->dfps)
+		return -EINVAL;
+
+	for (i = 0; i < pll_res->dfps->vco_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d vco_rate=%d, code %d %d %d\n",
+			codes_info->is_valid, codes_info->clk_rate,
+			codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2,
+			codes_info->pll_codes.pll_codes_3);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		pll_res->cache_pll_trim_codes[2] =
+			codes_info->pll_codes.pll_codes_3;
+		found = true;
+		break;
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	pr_debug("trim_code_0=0x%x trim_code_1=0x%x trim_code_2=0x%x\n",
+			pll_res->cache_pll_trim_codes[0],
+			pll_res->cache_pll_trim_codes[1],
+			pll_res->cache_pll_trim_codes[2]);
+
+	return 0;
+}
+
+static void shadow_dsi_pll_dynamic_refresh_10nm(struct dsi_pll_10nm *pll,
+						struct mdss_pll_resources *rsc)
+{
+	u32 data;
+	u32 offset = DSI_PHY_TO_PLL_OFFSET;
+	u32 upper_addr = 0;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	data &= ~BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_PLL_CNTRL, data, 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_CLK_CFG1) << 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_PLL_CNTRL) << 1);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+			   PHY_CMN_RBUF_CTRL,
+			   (PLL_DECIMAL_DIV_START_1 + offset),
+			   0, reg->decimal_div_start);
+	upper_addr |= (upper_8_bit(PHY_CMN_RBUF_CTRL) << 2);
+	upper_addr |= (upper_8_bit(PLL_DECIMAL_DIV_START_1 + offset) << 3);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+			   (PLL_FRAC_DIV_START_LOW_1 + offset),
+			   (PLL_FRAC_DIV_START_MID_1 + offset),
+			   reg->frac_div_start_low, reg->frac_div_start_mid);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_LOW_1 + offset) << 4);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_MID_1 + offset) << 5);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+			   (PLL_FRAC_DIV_START_HIGH_1 + offset),
+			   (PLL_PLL_PROP_GAIN_RATE_1 + offset),
+			   reg->frac_div_start_high, reg->pll_prop_gain_rate);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_HIGH_1 + offset) << 6);
+	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 7);
+
+	data = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE) & 0x03;
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+			   (PLL_PLL_OUTDIV_RATE + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset),
+			   data, 0);
+	upper_addr |= (upper_8_bit(PLL_PLL_OUTDIV_RATE + offset) << 8);
+	upper_addr |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset) << 9);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MID + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset),
+			   rsc->cache_pll_trim_codes[1],
+			   rsc->cache_pll_trim_codes[0]);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MID + offset) << 10);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset) << 11);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset),
+			   (PLL_PLL_BAND_SET_RATE_1 + offset),
+			   0x07, rsc->cache_pll_trim_codes[2]);
+	upper_addr |=
+		(upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset) << 12);
+	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SET_RATE_1 + offset) << 13);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+			   (PLL_CALIBRATION_SETTINGS + offset),
+			   (PLL_BAND_SEL_CAL_SETTINGS + offset), 0x44, 0x3a);
+	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 14);
+	upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS + offset) << 15);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+			   (PLL_PLL_LOCKDET_RATE_1 + offset),
+			   (PLL_PLL_LOCK_DELAY + offset), 0x10, 0x06);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 16);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_DELAY + offset) << 17);
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+			   PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+	if (rsc->slave)
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				   PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0,
+				   data, 0x7f);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	/* Dummy register writes */
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+	/* Registers to configure after PLL enable delay */
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+	if (rsc->slave) {
+		data = MDSS_PLL_REG_R(rsc->slave->phy_base, PHY_CMN_CLK_CFG1) |
+			BIT(5);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL,
+				   data, 0x01);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1,
+				   data, data);
+	}
+
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, upper_addr);
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0);
+	wmb(); /* commit register writes */
+}
+
+static int shadow_vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_10nm *pll;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_pll_read_stored_trim_codes(rsc, rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%ld\n", rate);
+		return -EINVAL;
+	}
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	/* program dynamic refresh control registers */
+	shadow_dsi_pll_dynamic_refresh_10nm(pll, rsc);
+
+	/* update cached vco rate */
+	rsc->vco_cached_rate = rate;
+	rsc->dfps_trigger = true;
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
 static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
@@ -716,7 +989,7 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
 	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
 	if (rsc->slave)
 		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
-				0x03, rsc->cached_cfg1);
+				0x03, rsc->slave->cached_cfg1);
 	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
 
 	/* Start PLL */
@@ -766,6 +1039,7 @@ static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
 	}
 
 	rsc->handoff_resources = false;
+	rsc->dfps_trigger = false;
 
 	pr_debug("stop PLL (%d)\n", rsc->index);
 
@@ -817,16 +1091,18 @@ static void vco_10nm_unprepare(struct clk_hw *hw)
 	/*
 	 * During unprepare in continuous splash use case we want driver
 	 * to pick all dividers instead of retaining bootloader configurations.
+	 * Also handle use cases where dynamic refresh triggered before
+	 * first suspend/resume.
 	 */
-	if (!pll->handoff_resources) {
+	if (!pll->handoff_resources || pll->dfps_trigger) {
 		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG0);
+						  PHY_CMN_CLK_CFG0);
 		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
-							PLL_PLL_OUTDIV_RATE);
+						    PLL_PLL_OUTDIV_RATE);
 		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
-					pll->cached_cfg1, pll->cached_outdiv);
+			 pll->cached_cfg1, pll->cached_outdiv);
 
-		pll->vco_cached_rate = clk_hw_get_rate(hw);
+		pll->vco_cached_rate = clk_get_rate(hw->clk);
 	}
 
 	/*
@@ -836,9 +1112,15 @@ static void vco_10nm_unprepare(struct clk_hw *hw)
 	 * does not change.For such usecases, we need to ensure that the cached
 	 * value is programmed prior to PLL being locked
 	 */
-	if (pll->handoff_resources)
+	if (pll->handoff_resources) {
 		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG1);
+						  PHY_CMN_CLK_CFG1);
+		if (pll->slave)
+			pll->slave->cached_cfg1 =
+				MDSS_PLL_REG_R(pll->slave->phy_base,
+					       PHY_CMN_CLK_CFG1);
+	}
+
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
 }
@@ -866,7 +1148,7 @@ static int vco_10nm_prepare(struct clk_hw *hw)
 	}
 
 	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+	    (pll->vco_cached_rate == clk_get_rate(hw->clk))) {
 		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
 				pll->vco_cached_rate);
 		if (rc) {
@@ -879,6 +1161,9 @@ static int vco_10nm_prepare(struct clk_hw *hw)
 			pll->cached_cfg1);
 		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
 					pll->cached_cfg0);
+		if (pll->slave)
+			MDSS_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0,
+				       pll->cached_cfg0);
 		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
 					pll->cached_outdiv);
 	}
@@ -1014,6 +1299,14 @@ static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
 	reg_val &= ~0xF0;
 	reg_val |= (div << 4);
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	pll->cached_cfg0 = reg_val;
 }
 
 static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
@@ -1151,6 +1444,12 @@ static const struct clk_ops clk_ops_vco_10nm = {
 	.unprepare = vco_10nm_unprepare,
 };
 
+static const struct clk_ops clk_ops_shadow_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = shadow_vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+};
+
 static struct regmap_bus mdss_mux_regmap_bus = {
 	.reg_write = mdss_set_mux_sel,
 	.reg_read = mdss_get_mux_sel,
@@ -1225,6 +1524,19 @@ static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
 	},
 };
 
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1000000000UL,
@@ -1238,6 +1550,19 @@ static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	},
 };
 
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1254,6 +1579,23 @@ static struct clk_regmap_div dsi0pll_pll_out_div = {
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1270,6 +1612,23 @@ static struct clk_regmap_div dsi1pll_pll_out_div = {
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1284,6 +1643,21 @@ static struct clk_regmap_div dsi0pll_bitclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1298,6 +1672,21 @@ static struct clk_regmap_div dsi1pll_bitclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_vco_div = {
 	.div = 4,
 	.mult = 1,
@@ -1305,7 +1694,19 @@ static struct clk_fixed_factor dsi0pll_post_vco_div = {
 		.name = "dsi0pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1317,7 +1718,19 @@ static struct clk_fixed_factor dsi1pll_post_vco_div = {
 		.name = "dsi1pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1334,6 +1747,18 @@ static struct clk_fixed_factor dsi0pll_byteclk_src = {
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_byteclk_src = {
 	.div = 8,
 	.mult = 1,
@@ -1346,6 +1771,18 @@ static struct clk_fixed_factor dsi1pll_byteclk_src = {
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1358,6 +1795,18 @@ static struct clk_fixed_factor dsi0pll_post_bit_div = {
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1370,15 +1819,29 @@ static struct clk_fixed_factor dsi1pll_post_bit_div = {
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_byteclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src",
+				"dsi0pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1390,9 +1853,11 @@ static struct clk_regmap_mux dsi1pll_byteclk_mux = {
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src",
+				"dsi1pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1416,6 +1881,25 @@ static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
 	},
 };
 
+static struct clk_regmap_mux dsi0pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_bitclk_src",
+				"dsi0pll_shadow_post_bit_div",
+				"dsi0pll_shadow_pll_out_div",
+				"dsi0pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
@@ -1434,6 +1918,25 @@ static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	},
 };
 
+static struct clk_regmap_mux dsi1pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_bitclk_src",
+				"dsi1pll_shadow_post_bit_div",
+				"dsi1pll_shadow_pll_out_div",
+				"dsi1pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1449,6 +1952,21 @@ static struct clk_regmap_div dsi0pll_pclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1464,15 +1982,32 @@ static struct clk_regmap_div dsi1pll_pclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_pclk_src",
+				"dsi0pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1484,9 +2019,11 @@ static struct clk_regmap_mux dsi1pll_pclk_mux = {
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
-			.num_parents = 1,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_pclk_src",
+				"dsi1pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+				  CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1503,6 +2040,14 @@ static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
 	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
 	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_0] = &dsi0pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
 	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
 	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -1513,6 +2058,14 @@ static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
 	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
 	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_1] = &dsi1pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
 };
 
 int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1549,18 +2102,20 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 
 	/* Establish client data */
 	if (ndx == 0) {
-
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pll_out_div.clkr.regmap = rmap;
+		dsi0pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_bitclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1569,12 +2124,16 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_byteclk_mux.clkr.regmap = rmap;
 
 		dsi0pll_vco_clk.priv = pll_res;
-		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+		dsi0pll_shadow_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_0; i <= SHADOW_PCLK_SRC_0_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_10nm[i]);
 			if (IS_ERR(clk)) {
@@ -1589,20 +2148,21 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 
 		rc = of_clk_add_provider(pdev->dev.of_node,
 				of_clk_src_onecell_get, clk_data);
-
-
 	} else {
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pll_out_div.clkr.regmap = rmap;
+		dsi1pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_bitclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1611,12 +2171,16 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+
 		dsi1pll_vco_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
 
-		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+		for (i = VCO_CLK_1; i <= SHADOW_PCLK_SRC_1_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_10nm[i]);
 			if (IS_ERR(clk)) {

+ 3 - 3
pll/dsi_pll_14nm_util.c

@@ -34,12 +34,12 @@ static int mdss_pll_read_stored_trim_codes(
 		goto end_read;
 	}
 
-	for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
+	for (i = 0; i < dsi_pll_res->dfps->vco_rate_cnt; i++) {
 		struct dfps_codes_info *codes_info =
 			&dsi_pll_res->dfps->codes_dfps[i];
 
-		pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
-			codes_info->is_valid, codes_info->frame_rate,
+		pr_debug("valid=%d frame_rate=%d, code %d %d\n",
+			codes_info->is_valid,
 			codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
 			codes_info->pll_codes.pll_codes_2);
 

+ 648 - 31
pll/dsi_pll_7nm.c

@@ -11,7 +11,7 @@
 #include <linux/delay.h>
 #include "dsi_pll.h"
 #include "pll_drv.h"
-#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+#include <dt-bindings/clock/mdss-7nm-pll-clk.h>
 
 #define VCO_DELAY_USEC 1
 
@@ -182,6 +182,7 @@
 #define PHY_CMN_CLK_CFG1	0x014
 #define PHY_CMN_RBUF_CTRL	0x01C
 #define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_2		0x02C
 #define PHY_CMN_CTRL_3		0x030
 #define PHY_CMN_PLL_CNTRL	0x03C
 #define PHY_CMN_GLBL_DIGTOP_SPARE4 0x128
@@ -196,6 +197,43 @@
 #define SSC_START		BIT(6)
 #define SSC_START_MUX		BIT(7)
 
+/* Dynamic Refresh Control Registers */
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0		(0x014)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1		(0x018)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2		(0x01C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3		(0x020)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4		(0x024)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5		(0x028)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6		(0x02C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7		(0x030)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8		(0x034)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9		(0x038)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10		(0x03C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11		(0x040)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12		(0x044)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13		(0x048)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14		(0x04C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		(0x050)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16		(0x054)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17		(0x058)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18		(0x05C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		(0x060)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		(0x064)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		(0x068)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		(0x06C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		(0x070)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		(0x074)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		(0x078)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		(0x07C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		(0x080)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		(0x084)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		(0x088)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30		(0x08C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31		(0x090)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	(0x094)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	(0x098)
+
+#define DSI_PHY_TO_PLL_OFFSET	(0x500)
 enum {
 	DSI_PLL_0,
 	DSI_PLL_1,
@@ -554,6 +592,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll,
 	regs->frac_div_start_low = (frac & 0xff);
 	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
 	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+	regs->pll_prop_gain_rate = 10;
 }
 
 static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll,
@@ -889,6 +928,7 @@ static int vco_7nm_set_rate(struct clk_hw *hw, unsigned long rate,
 
 	rsc->vco_current_rate = rate;
 	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+	rsc->dfps_trigger = false;
 
 	rc = mdss_pll_resource_enable(rsc, true);
 	if (rc) {
@@ -919,6 +959,289 @@ static int vco_7nm_set_rate(struct clk_hw *hw, unsigned long rate,
 	return 0;
 }
 
+static int dsi_pll_read_stored_trim_codes(struct mdss_pll_resources *pll_res,
+					  unsigned long vco_clk_rate)
+{
+	int i;
+	bool found = false;
+
+	if (!pll_res->dfps)
+		return -EINVAL;
+
+	for (i = 0; i < pll_res->dfps->vco_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d vco_rate=%d, code %d %d %d\n",
+			codes_info->is_valid, codes_info->clk_rate,
+			codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2,
+			codes_info->pll_codes.pll_codes_3);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		pll_res->cache_pll_trim_codes[2] =
+			codes_info->pll_codes.pll_codes_3;
+		found = true;
+		break;
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	pr_debug("trim_code_0=0x%x trim_code_1=0x%x trim_code_2=0x%x\n",
+			pll_res->cache_pll_trim_codes[0],
+			pll_res->cache_pll_trim_codes[1],
+			pll_res->cache_pll_trim_codes[2]);
+
+	return 0;
+}
+
+static void shadow_dsi_pll_dynamic_refresh_7nm(struct dsi_pll_7nm *pll,
+						struct mdss_pll_resources *rsc)
+{
+	u32 data;
+	u32 offset = DSI_PHY_TO_PLL_OFFSET;
+	u32 upper_addr = 0;
+	u32 upper_addr2 = 0;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	data &= ~BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_PLL_CNTRL, data, 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_CLK_CFG1) << 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_PLL_CNTRL) << 1);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+			   PHY_CMN_RBUF_CTRL,
+			   (PLL_CORE_INPUT_OVERRIDE + offset),
+			   0, 0x12);
+	upper_addr |= (upper_8_bit(PHY_CMN_RBUF_CTRL) << 2);
+	upper_addr |= (upper_8_bit(PLL_CORE_INPUT_OVERRIDE + offset) << 3);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+			   (PLL_DECIMAL_DIV_START_1 + offset),
+			   (PLL_FRAC_DIV_START_LOW_1 + offset),
+			   reg->decimal_div_start, reg->frac_div_start_low);
+	upper_addr |= (upper_8_bit(PLL_DECIMAL_DIV_START_1 + offset) << 4);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_LOW_1 + offset) << 5);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+			   (PLL_FRAC_DIV_START_MID_1 + offset),
+			   (PLL_FRAC_DIV_START_HIGH_1 + offset),
+			   reg->frac_div_start_mid, reg->frac_div_start_high);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_MID_1 + offset) << 6);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_HIGH_1 + offset) << 7);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+			   (PLL_SYSTEM_MUXES + offset),
+			   (PLL_PLL_LOCKDET_RATE_1 + offset),
+			   0xc0, 0x40);
+	upper_addr |= (upper_8_bit(PLL_SYSTEM_MUXES + offset) << 8);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 9);
+
+	data = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE) & 0x03;
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+			   (PLL_PLL_OUTDIV_RATE + offset),
+			   (PLL_PLL_LOCK_DELAY + offset),
+			   data, 0x06);
+
+	upper_addr |= (upper_8_bit(PLL_PLL_OUTDIV_RATE + offset) << 10);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_DELAY + offset) << 11);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+			   (PLL_CMODE_1 + offset),
+			   (PLL_CLOCK_INVERTERS_1 + offset),
+			   0x10, reg->pll_clock_inverters);
+	upper_addr |=
+		(upper_8_bit(PLL_CMODE_1 + offset) << 12);
+	upper_addr |= (upper_8_bit(PLL_CLOCK_INVERTERS_1 + offset) << 13);
+
+	data = MDSS_PLL_REG_R(rsc->pll_base, PLL_VCO_CONFIG_1);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+			   (PLL_ANALOG_CONTROLS_FIVE_1 + offset),
+			   (PLL_VCO_CONFIG_1 + offset),
+			   0x01, data);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_FIVE_1 + offset) << 14);
+	upper_addr |= (upper_8_bit(PLL_VCO_CONFIG_1 + offset) << 15);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+			   (PLL_ANALOG_CONTROLS_FIVE + offset),
+			   (PLL_DSM_DIVIDER + offset), 0x01, 0);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_FIVE + offset) << 16);
+	upper_addr |= (upper_8_bit(PLL_DSM_DIVIDER + offset) << 17);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+			   (PLL_FEEDBACK_DIVIDER + offset),
+			   (PLL_CALIBRATION_SETTINGS + offset), 0x4E, 0x40);
+	upper_addr |= (upper_8_bit(PLL_FEEDBACK_DIVIDER + offset) << 18);
+	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 19);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+			   (PLL_BAND_SEL_CAL_SETTINGS_THREE + offset),
+			   (PLL_FREQ_DETECT_SETTINGS_ONE + offset), 0xBA, 0x0C);
+	upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS_THREE + offset)
+		       << 20);
+	upper_addr |= (upper_8_bit(PLL_FREQ_DETECT_SETTINGS_ONE + offset)
+		       << 21);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+			   (PLL_OUTDIV + offset),
+			   (PLL_CORE_OVERRIDE + offset), 0, 0);
+	upper_addr |= (upper_8_bit(PLL_OUTDIV + offset) << 22);
+	upper_addr |= (upper_8_bit(PLL_CORE_OVERRIDE + offset) << 23);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+			   (PLL_PLL_DIGITAL_TIMERS_TWO + offset),
+			   (PLL_PLL_PROP_GAIN_RATE_1 + offset),
+			    0x08, reg->pll_prop_gain_rate);
+	upper_addr |= (upper_8_bit(PLL_PLL_DIGITAL_TIMERS_TWO + offset) << 24);
+	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 25);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+			   (PLL_PLL_BAND_SEL_RATE_1 + offset),
+			   (PLL_PLL_INT_GAIN_IFILT_BAND_1 + offset),
+			    0xC0, 0x82);
+	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 26);
+	upper_addr |= (upper_8_bit(PLL_PLL_INT_GAIN_IFILT_BAND_1 + offset)
+		       << 27);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+			   (PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 + offset),
+			   (PLL_PLL_LOCK_OVERRIDE + offset),
+			    0x4c, 0x80);
+	upper_addr |= (upper_8_bit(PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 + offset)
+		       << 28);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_OVERRIDE + offset) << 29);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+			   (PLL_PFILT + offset),
+			   (PLL_IFILT + offset),
+			    0x2f, 0x3f);
+	upper_addr |= (upper_8_bit(PLL_PFILT + offset) << 30);
+	upper_addr |= (upper_8_bit(PLL_IFILT + offset) << 31);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL16,
+			   (PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset),
+			   (PLL_FREQ_TUNE_ACCUM_INIT_MID + offset),
+			   rsc->cache_pll_trim_codes[0], rsc->cache_pll_trim_codes[1] );
+	upper_addr2 |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset) << 0);
+	upper_addr2 |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MID + offset) << 1);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+			   (PLL_PLL_BAND_SEL_RATE_1 + offset),
+			   ( PLL_PLL_BAND_SEL_RATE_1+ offset),
+			   rsc->cache_pll_trim_codes[2], rsc->cache_pll_trim_codes[2]);
+	upper_addr2 |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 0);
+	upper_addr2 |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 1);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+			   (PLL_SYSTEM_MUXES + offset),
+			   (PLL_CALIBRATION_SETTINGS + offset),
+			    0xc0, 0x40);
+	upper_addr2 |= (upper_8_bit(PLL_BAND_SEL_CAL + offset) << 2);
+	upper_addr2 |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 3);
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+			   PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+	if (rsc->slave)
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				   PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0,
+				   data, 0x7f);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+			   PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+			   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+
+	if (rsc->slave) {
+		data = MDSS_PLL_REG_R(rsc->slave->phy_base, PHY_CMN_CLK_CFG1) |
+			BIT(5);
+
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL,
+				   data, 0x01);
+		MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+				   DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				   PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1,
+				   data, data);
+	}
+
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, upper_addr);
+	MDSS_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, upper_addr2);
+	wmb(); /* commit register writes */
+}
+
+static int shadow_vco_7nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_7nm *pll;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_pll_read_stored_trim_codes(rsc, rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%ld\n", rate);
+		return -EINVAL;
+	}
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	/* program dynamic refresh control registers */
+	shadow_dsi_pll_dynamic_refresh_7nm(pll, rsc);
+
+	/* update cached vco rate */
+	rsc->vco_cached_rate = rate;
+	rsc->dfps_trigger = true;
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
 static int dsi_pll_7nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
@@ -1002,7 +1325,7 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
 	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
 	if (rsc->slave)
 		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
-				0x03, rsc->cached_cfg1);
+				0x03, rsc->slave->cached_cfg1);
 	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
 
 	/* Start PLL */
@@ -1057,6 +1380,7 @@ static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
 	}
 
 	rsc->handoff_resources = false;
+	rsc->dfps_trigger = false;
 
 	pr_debug("stop PLL (%d)\n", rsc->index);
 
@@ -1108,16 +1432,22 @@ static void vco_7nm_unprepare(struct clk_hw *hw)
 	/*
 	 * During unprepare in continuous splash use case we want driver
 	 * to pick all dividers instead of retaining bootloader configurations.
+	 * Also handle the usecases when dynamic refresh gets triggered while
+	 * handoff_resources flag is still set. For video mode, this flag does
+	 * not get cleared until first suspend. Whereas for command mode, it
+	 * doesnt get cleared until first idle power collapse. We need to make
+	 * sure that we save and restore the divider settings when dynamic FPS
+	 * is triggered.
 	 */
-	if (!pll->handoff_resources) {
+	if (!pll->handoff_resources || pll->dfps_trigger) {
 		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG0);
+						  PHY_CMN_CLK_CFG0);
 		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
-							PLL_PLL_OUTDIV_RATE);
+						    PLL_PLL_OUTDIV_RATE);
 		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
-					pll->cached_cfg1, pll->cached_outdiv);
+			 pll->cached_cfg1, pll->cached_outdiv);
 
-		pll->vco_cached_rate = clk_hw_get_rate(hw);
+		pll->vco_cached_rate = clk_get_rate(hw->clk);
 	}
 
 	/*
@@ -1127,9 +1457,14 @@ static void vco_7nm_unprepare(struct clk_hw *hw)
 	 * does not change.For such usecases, we need to ensure that the cached
 	 * value is programmed prior to PLL being locked
 	 */
-	if (pll->handoff_resources)
+	if (pll->handoff_resources) {
 		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
-							PHY_CMN_CLK_CFG1);
+						  PHY_CMN_CLK_CFG1);
+		if (pll->slave)
+			pll->slave->cached_cfg1 =
+				MDSS_PLL_REG_R(pll->slave->phy_base,
+					       PHY_CMN_CLK_CFG1);
+	}
 
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
@@ -1171,6 +1506,9 @@ static int vco_7nm_prepare(struct clk_hw *hw)
 			pll->cached_cfg1);
 		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
 					pll->cached_cfg0);
+		if (pll->slave)
+			MDSS_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0,
+				       pll->cached_cfg0);
 		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
 					pll->cached_outdiv);
 	}
@@ -1253,6 +1591,14 @@ static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
 	reg_val &= ~0xF0;
 	reg_val |= (div << 4);
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	pll->cached_cfg0 = reg_val;
 }
 
 static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
@@ -1372,6 +1718,12 @@ static const struct clk_ops clk_ops_vco_7nm = {
 	.unprepare = vco_7nm_unprepare,
 };
 
+static const struct clk_ops clk_ops_shadow_vco_7nm = {
+	.recalc_rate = vco_7nm_recalc_rate,
+	.set_rate = shadow_vco_7nm_set_rate,
+	.round_rate = vco_7nm_round_rate,
+};
+
 static struct regmap_bus mdss_mux_regmap_bus = {
 	.reg_write = mdss_set_mux_sel,
 	.reg_read = mdss_get_mux_sel,
@@ -1445,6 +1797,18 @@ static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
 	},
 };
 
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_7nm,
+	},
+};
+
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1000000000UL,
@@ -1457,6 +1821,18 @@ static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	},
 };
 
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_vco_7nm,
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1473,6 +1849,23 @@ static struct clk_regmap_div dsi0pll_pll_out_div = {
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pll_out_div = {
 	.reg = PLL_PLL_OUTDIV_RATE,
 	.shift = 0,
@@ -1489,6 +1882,23 @@ static struct clk_regmap_div dsi1pll_pll_out_div = {
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pll_out_div",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1504,6 +1914,22 @@ static struct clk_regmap_div dsi0pll_bitclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1519,6 +1945,22 @@ static struct clk_regmap_div dsi1pll_bitclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_bitclk_src",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_pll_out_div"},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_vco_div = {
 	.div = 4,
 	.mult = 1,
@@ -1526,7 +1968,17 @@ static struct clk_fixed_factor dsi0pll_post_vco_div = {
 		.name = "dsi0pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+		.num_parents = 1,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1538,7 +1990,17 @@ static struct clk_fixed_factor dsi1pll_post_vco_div = {
 		.name = "dsi1pll_post_vco_div",
 		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
 		.num_parents = 1,
-		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+		.num_parents = 1,
 		.ops = &clk_fixed_factor_ops,
 	},
 };
@@ -1555,6 +2017,18 @@ static struct clk_fixed_factor dsi0pll_byteclk_src = {
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_byteclk_src = {
 	.div = 8,
 	.mult = 1,
@@ -1567,6 +2041,18 @@ static struct clk_fixed_factor dsi1pll_byteclk_src = {
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi0pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1578,6 +2064,17 @@ static struct clk_fixed_factor dsi0pll_post_bit_div = {
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_post_bit_div = {
 	.div = 2,
 	.mult = 1,
@@ -1589,15 +2086,28 @@ static struct clk_fixed_factor dsi1pll_post_bit_div = {
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_byteclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src",
+				"dsi0pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_SET_RATE_PARENT |
+					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1609,9 +2119,11 @@ static struct clk_regmap_mux dsi1pll_byteclk_mux = {
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_byteclk",
-			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src",
+				"dsi1pll_shadow_byteclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_SET_RATE_PARENT |
+					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1634,6 +2146,24 @@ static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
 	},
 };
 
+static struct clk_regmap_mux dsi0pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_bitclk_src",
+				"dsi0pll_shadow_post_bit_div",
+				"dsi0pll_shadow_pll_out_div",
+				"dsi0pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
@@ -1651,6 +2181,24 @@ static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	},
 };
 
+static struct clk_regmap_mux dsi1pll_shadow_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_bitclk_src",
+				"dsi1pll_shadow_post_bit_div",
+				"dsi1pll_shadow_pll_out_div",
+				"dsi1pll_shadow_post_vco_div"},
+			.num_parents = 4,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1667,6 +2215,22 @@ static struct clk_regmap_div dsi0pll_pclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -1683,15 +2247,33 @@ static struct clk_regmap_div dsi1pll_pclk_src = {
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_pclk_src",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
 	.width = 1,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
+			.parent_names = (const char *[]){"dsi0pll_pclk_src",
+				"dsi0pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_SET_RATE_PARENT |
+					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1703,9 +2285,11 @@ static struct clk_regmap_mux dsi1pll_pclk_mux = {
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1_phy_pll_out_dsiclk",
-			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
+			.parent_names = (const char *[]){"dsi1pll_pclk_src",
+				"dsi1pll_shadow_pclk_src"},
+			.num_parents = 2,
+			.flags = (CLK_SET_RATE_PARENT |
+					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1722,6 +2306,14 @@ static struct clk_hw *mdss_dsi_pllcc_7nm[] = {
 	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
 	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_0] = &dsi0pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
 	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
 	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -1732,6 +2324,14 @@ static struct clk_hw *mdss_dsi_pllcc_7nm[] = {
 	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
 	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+	[SHADOW_VCO_CLK_1] = &dsi1pll_shadow_vco_clk.hw,
+	[SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
+	[SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
+	[SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+	[SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
+	[SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+	[SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
+	[SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
 };
 
 int dsi_pll_clock_register_7nm(struct platform_device *pdev,
@@ -1743,6 +2343,12 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
 	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_7nm);
 	struct regmap *rmap;
 
+	if (!pdev || !pdev->dev.of_node ||
+		!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
 	ndx = pll_res->index;
 
 	if (ndx >= DSI_PLL_MAX) {
@@ -1755,12 +2361,13 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
 	pll_res->priv = &plls[ndx];
 	pll_res->vco_delay = VCO_DELAY_USEC;
 
-	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
 	if (!clk_data)
 		return -ENOMEM;
 
-	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
-				sizeof(struct clk *), GFP_KERNEL);
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+				sizeof(struct clk *)), GFP_KERNEL);
 	if (!clk_data->clks)
 		return -ENOMEM;
 
@@ -1768,18 +2375,20 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
 
 	/* Establish client data */
 	if (ndx == 0) {
-
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi0pll_pll_out_div.clkr.regmap = rmap;
+		dsi0pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi0pll_bitclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi0pll_pclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
@@ -1788,18 +2397,21 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi0pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi0pll_byteclk_mux.clkr.regmap = rmap;
 
 		dsi0pll_vco_clk.priv = pll_res;
+		dsi0pll_shadow_vco_clk.priv = pll_res;
 
 		if (dsi_pll_7nm_is_hw_revision_v4_1(pll_res)) {
 			dsi0pll_vco_clk.min_rate = 600000000;
 			dsi0pll_vco_clk.max_rate = 5000000000;
 		}
 
-		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+		for (i = VCO_CLK_0; i <= SHADOW_PCLK_SRC_0_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_7nm[i]);
 			if (IS_ERR(clk)) {
@@ -1814,20 +2426,21 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
 
 		rc = of_clk_add_provider(pdev->dev.of_node,
 				of_clk_src_onecell_get, clk_data);
-
-
 	} else {
 		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi1pll_pll_out_div.clkr.regmap = rmap;
+		dsi1pll_shadow_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi1pll_bitclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_bitclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi1pll_pclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
@@ -1836,17 +2449,21 @@ int dsi_pll_clock_register_7nm(struct platform_device *pdev,
 		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+		dsi1pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
 		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+
 		dsi1pll_vco_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
 
 		if (dsi_pll_7nm_is_hw_revision_v4_1(pll_res)) {
 			dsi1pll_vco_clk.min_rate = 600000000;
 			dsi1pll_vco_clk.max_rate = 5000000000;
 		}
 
-		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+		for (i = VCO_CLK_1; i <= SHADOW_PCLK_SRC_1_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_7nm[i]);
 			if (IS_ERR(clk)) {

+ 11 - 11
pll/pll_drv.h

@@ -33,6 +33,8 @@
 		writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
 			(base) + (offset))
 
+#define upper_8_bit(x) ((((x) >> 2) & 0x100) >> 8)
+
 enum {
 	MDSS_DSI_PLL_10NM,
 	MDSS_DP_PLL_10NM,
@@ -52,30 +54,23 @@ enum {
 	MDSS_PLL_TARGET_8996,
 };
 
-#define DFPS_MAX_NUM_OF_FRAME_RATES 20
-
-struct dfps_panel_info {
-	uint32_t enabled;
-	uint32_t frame_rate_cnt;
-	uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
-};
+#define DFPS_MAX_NUM_OF_FRAME_RATES 16
 
 struct dfps_pll_codes {
 	uint32_t pll_codes_1;
 	uint32_t pll_codes_2;
+	uint32_t pll_codes_3;
 };
 
 struct dfps_codes_info {
 	uint32_t is_valid;
-	uint32_t frame_rate;	/* hz */
 	uint32_t clk_rate;	/* hz */
 	struct dfps_pll_codes pll_codes;
 };
 
 struct dfps_info {
-	struct dfps_panel_info panel_dfps;
+	uint32_t vco_rate_cnt;
 	struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
-	void *dfps_fb_base;
 };
 
 struct mdss_pll_resources {
@@ -154,7 +149,7 @@ struct mdss_pll_resources {
 	/*
 	 * caching the pll trim codes in the case of dynamic refresh
 	 */
-	int		cache_pll_trim_codes[2];
+	int		cache_pll_trim_codes[3];
 
 	/*
 	 * for maintaining the status of saving trim codes
@@ -196,6 +191,11 @@ struct mdss_pll_resources {
 	 */
 	struct dfps_info *dfps;
 
+	/*
+	 * for cases where dfps trigger happens before first
+	 * suspend/resume and handoff is not finished.
+	 */
+	bool dfps_trigger;
 };
 
 struct mdss_pll_vco_calc {