dsi_phy_hw_v3_0.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/math64.h>
  6. #include <linux/delay.h>
  7. #include <linux/iopoll.h>
  8. #include "dsi_hw.h"
  9. #include "dsi_phy_hw.h"
  10. #include "dsi_catalog.h"
  11. #define DSIPHY_CMN_CLK_CFG0 0x010
  12. #define DSIPHY_CMN_CLK_CFG1 0x014
  13. #define DSIPHY_CMN_GLBL_CTRL 0x018
  14. #define DSIPHY_CMN_RBUF_CTRL 0x01C
  15. #define DSIPHY_CMN_VREG_CTRL 0x020
  16. #define DSIPHY_CMN_CTRL_0 0x024
  17. #define DSIPHY_CMN_CTRL_1 0x028
  18. #define DSIPHY_CMN_CTRL_2 0x02C
  19. #define DSIPHY_CMN_LANE_CFG0 0x030
  20. #define DSIPHY_CMN_LANE_CFG1 0x034
  21. #define DSIPHY_CMN_PLL_CNTRL 0x038
  22. #define DSIPHY_CMN_LANE_CTRL0 0x098
  23. #define DSIPHY_CMN_LANE_CTRL1 0x09C
  24. #define DSIPHY_CMN_LANE_CTRL2 0x0A0
  25. #define DSIPHY_CMN_LANE_CTRL3 0x0A4
  26. #define DSIPHY_CMN_LANE_CTRL4 0x0A8
  27. #define DSIPHY_CMN_TIMING_CTRL_0 0x0AC
  28. #define DSIPHY_CMN_TIMING_CTRL_1 0x0B0
  29. #define DSIPHY_CMN_TIMING_CTRL_2 0x0B4
  30. #define DSIPHY_CMN_TIMING_CTRL_3 0x0B8
  31. #define DSIPHY_CMN_TIMING_CTRL_4 0x0BC
  32. #define DSIPHY_CMN_TIMING_CTRL_5 0x0C0
  33. #define DSIPHY_CMN_TIMING_CTRL_6 0x0C4
  34. #define DSIPHY_CMN_TIMING_CTRL_7 0x0C8
  35. #define DSIPHY_CMN_TIMING_CTRL_8 0x0CC
  36. #define DSIPHY_CMN_TIMING_CTRL_9 0x0D0
  37. #define DSIPHY_CMN_TIMING_CTRL_10 0x0D4
  38. #define DSIPHY_CMN_TIMING_CTRL_11 0x0D8
  39. #define DSIPHY_CMN_PHY_STATUS 0x0EC
  40. #define DSIPHY_CMN_LANE_STATUS0 0x0F4
  41. #define DSIPHY_CMN_LANE_STATUS1 0x0F8
  42. /* n = 0..3 for data lanes and n = 4 for clock lane */
  43. #define DSIPHY_LNX_CFG0(n) (0x200 + (0x80 * (n)))
  44. #define DSIPHY_LNX_CFG1(n) (0x204 + (0x80 * (n)))
  45. #define DSIPHY_LNX_CFG2(n) (0x208 + (0x80 * (n)))
  46. #define DSIPHY_LNX_CFG3(n) (0x20C + (0x80 * (n)))
  47. #define DSIPHY_LNX_TEST_DATAPATH(n) (0x210 + (0x80 * (n)))
  48. #define DSIPHY_LNX_PIN_SWAP(n) (0x214 + (0x80 * (n)))
  49. #define DSIPHY_LNX_HSTX_STR_CTRL(n) (0x218 + (0x80 * (n)))
  50. #define DSIPHY_LNX_OFFSET_TOP_CTRL(n) (0x21C + (0x80 * (n)))
  51. #define DSIPHY_LNX_OFFSET_BOT_CTRL(n) (0x220 + (0x80 * (n)))
  52. #define DSIPHY_LNX_LPTX_STR_CTRL(n) (0x224 + (0x80 * (n)))
  53. #define DSIPHY_LNX_LPRX_CTRL(n) (0x228 + (0x80 * (n)))
  54. #define DSIPHY_LNX_TX_DCTRL(n) (0x22C + (0x80 * (n)))
  55. /* dynamic refresh control registers */
  56. #define DSI_DYN_REFRESH_CTRL (0x000)
  57. #define DSI_DYN_REFRESH_PIPE_DELAY (0x004)
  58. #define DSI_DYN_REFRESH_PIPE_DELAY2 (0x008)
  59. #define DSI_DYN_REFRESH_PLL_DELAY (0x00C)
  60. #define DSI_DYN_REFRESH_STATUS (0x010)
  61. #define DSI_DYN_REFRESH_PLL_CTRL0 (0x014)
  62. #define DSI_DYN_REFRESH_PLL_CTRL1 (0x018)
  63. #define DSI_DYN_REFRESH_PLL_CTRL2 (0x01C)
  64. #define DSI_DYN_REFRESH_PLL_CTRL3 (0x020)
  65. #define DSI_DYN_REFRESH_PLL_CTRL4 (0x024)
  66. #define DSI_DYN_REFRESH_PLL_CTRL5 (0x028)
  67. #define DSI_DYN_REFRESH_PLL_CTRL6 (0x02C)
  68. #define DSI_DYN_REFRESH_PLL_CTRL7 (0x030)
  69. #define DSI_DYN_REFRESH_PLL_CTRL8 (0x034)
  70. #define DSI_DYN_REFRESH_PLL_CTRL9 (0x038)
  71. #define DSI_DYN_REFRESH_PLL_CTRL10 (0x03C)
  72. #define DSI_DYN_REFRESH_PLL_CTRL11 (0x040)
  73. #define DSI_DYN_REFRESH_PLL_CTRL12 (0x044)
  74. #define DSI_DYN_REFRESH_PLL_CTRL13 (0x048)
  75. #define DSI_DYN_REFRESH_PLL_CTRL14 (0x04C)
  76. #define DSI_DYN_REFRESH_PLL_CTRL15 (0x050)
  77. #define DSI_DYN_REFRESH_PLL_CTRL16 (0x054)
  78. #define DSI_DYN_REFRESH_PLL_CTRL17 (0x058)
  79. #define DSI_DYN_REFRESH_PLL_CTRL18 (0x05C)
  80. #define DSI_DYN_REFRESH_PLL_CTRL19 (0x060)
  81. #define DSI_DYN_REFRESH_PLL_CTRL20 (0x064)
  82. #define DSI_DYN_REFRESH_PLL_CTRL21 (0x068)
  83. #define DSI_DYN_REFRESH_PLL_CTRL22 (0x06C)
  84. #define DSI_DYN_REFRESH_PLL_CTRL23 (0x070)
  85. #define DSI_DYN_REFRESH_PLL_CTRL24 (0x074)
  86. #define DSI_DYN_REFRESH_PLL_CTRL25 (0x078)
  87. #define DSI_DYN_REFRESH_PLL_CTRL26 (0x07C)
  88. #define DSI_DYN_REFRESH_PLL_CTRL27 (0x080)
  89. #define DSI_DYN_REFRESH_PLL_CTRL28 (0x084)
  90. #define DSI_DYN_REFRESH_PLL_CTRL29 (0x088)
  91. #define DSI_DYN_REFRESH_PLL_CTRL30 (0x08C)
  92. #define DSI_DYN_REFRESH_PLL_CTRL31 (0x090)
  93. #define DSI_DYN_REFRESH_PLL_UPPER_ADDR (0x094)
  94. #define DSI_DYN_REFRESH_PLL_UPPER_ADDR2 (0x098)
  95. /**
  96. * regulator_enable() - enable regulators for DSI PHY
  97. * @phy: Pointer to DSI PHY hardware object.
  98. * @reg_cfg: Regulator configuration for all DSI lanes.
  99. */
  100. void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy,
  101. struct dsi_phy_per_lane_cfgs *reg_cfg)
  102. {
  103. DSI_PHY_DBG(phy, "Phy regulators enabled\n");
  104. /* Nothing to be done for DSI PHY regulator enable */
  105. }
  106. /**
  107. * regulator_disable() - disable regulators
  108. * @phy: Pointer to DSI PHY hardware object.
  109. */
  110. void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy)
  111. {
  112. DSI_PHY_DBG(phy, "Phy regulators disabled\n");
  113. /* Nothing to be done for DSI PHY regulator disable */
  114. }
  115. void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy)
  116. {
  117. DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
  118. /* ensure that the FIFO is off */
  119. wmb();
  120. DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x1);
  121. /* ensure that the FIFO is toggled back on */
  122. wmb();
  123. }
  124. static int dsi_phy_hw_v3_0_is_pll_on(struct dsi_phy_hw *phy)
  125. {
  126. u32 data = 0;
  127. data = DSI_R32(phy, DSIPHY_CMN_PLL_CNTRL);
  128. mb(); /*make sure read happened */
  129. return (data & BIT(0));
  130. }
  131. static void dsi_phy_hw_v3_0_config_lpcdrx(struct dsi_phy_hw *phy,
  132. struct dsi_phy_cfg *cfg, bool enable)
  133. {
  134. int phy_lane_0 = dsi_phy_conv_logical_to_phy_lane(&cfg->lane_map,
  135. DSI_LOGICAL_LANE_0);
  136. /*
  137. * LPRX and CDRX need to enabled only for physical data lane
  138. * corresponding to the logical data lane 0
  139. */
  140. if (enable)
  141. DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0),
  142. cfg->strength.lane[phy_lane_0][1]);
  143. else
  144. DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), 0);
  145. }
  146. static void dsi_phy_hw_v3_0_lane_swap_config(struct dsi_phy_hw *phy,
  147. struct dsi_lane_map *lane_map)
  148. {
  149. DSI_W32(phy, DSIPHY_CMN_LANE_CFG0,
  150. (lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
  151. (lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4)));
  152. DSI_W32(phy, DSIPHY_CMN_LANE_CFG1,
  153. (lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] |
  154. (lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 4)));
  155. }
  156. static void dsi_phy_hw_v3_0_lane_settings(struct dsi_phy_hw *phy,
  157. struct dsi_phy_cfg *cfg)
  158. {
  159. int i;
  160. u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x04, 0x01};
  161. /* Strength ctrl settings */
  162. for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
  163. DSI_W32(phy, DSIPHY_LNX_LPTX_STR_CTRL(i),
  164. cfg->strength.lane[i][0]);
  165. /*
  166. * Disable LPRX and CDRX for all lanes. And later on, it will
  167. * be only enabled for the physical data lane corresponding
  168. * to the logical data lane 0
  169. */
  170. DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
  171. DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
  172. DSI_W32(phy, DSIPHY_LNX_HSTX_STR_CTRL(i), 0x88);
  173. }
  174. dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, true);
  175. /* other settings */
  176. for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
  177. DSI_W32(phy, DSIPHY_LNX_CFG0(i), cfg->lanecfg.lane[i][0]);
  178. DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
  179. DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
  180. DSI_W32(phy, DSIPHY_LNX_CFG3(i), cfg->lanecfg.lane[i][3]);
  181. DSI_W32(phy, DSIPHY_LNX_OFFSET_TOP_CTRL(i), 0x0);
  182. DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
  183. DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
  184. }
  185. }
  186. void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
  187. {
  188. u32 reg;
  189. DSI_PHY_DBG(phy, "enable=%s\n", enable ? "true" : "false");
  190. /*
  191. * DSI PHY lane clamps, also referred to as PHY FreezeIO is
  192. * enalbed by default as part of the initialization sequnce.
  193. * This would get triggered anytime the chip FreezeIO is asserted.
  194. */
  195. if (enable)
  196. return;
  197. /*
  198. * Toggle BIT 0 to exlplictly release PHY freeze I/0 to disable
  199. * the clamps.
  200. */
  201. reg = DSI_R32(phy, DSIPHY_LNX_TX_DCTRL(3));
  202. DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg | BIT(0));
  203. wmb(); /* Ensure that the freezeio bit is toggled */
  204. DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg & ~BIT(0));
  205. wmb(); /* Ensure that the freezeio bit is toggled */
  206. }
  207. /**
  208. * enable() - Enable PHY hardware
  209. * @phy: Pointer to DSI PHY hardware object.
  210. * @cfg: Per lane configurations for timing, strength and lane
  211. * configurations.
  212. */
  213. void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy,
  214. struct dsi_phy_cfg *cfg)
  215. {
  216. int rc = 0;
  217. u32 status;
  218. u32 const delay_us = 5;
  219. u32 const timeout_us = 1000;
  220. struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
  221. u32 data;
  222. if (dsi_phy_hw_v3_0_is_pll_on(phy))
  223. DSI_PHY_WARN(phy, "PLL turned on before configuring PHY\n");
  224. /* wait for REFGEN READY */
  225. rc = readl_poll_timeout_atomic(phy->base + DSIPHY_CMN_PHY_STATUS,
  226. status, (status & BIT(0)), delay_us, timeout_us);
  227. if (rc) {
  228. DSI_PHY_ERR(phy, "Ref gen not ready. Aborting\n");
  229. return;
  230. }
  231. /* de-assert digital and pll power down */
  232. data = BIT(6) | BIT(5);
  233. DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
  234. /* Assert PLL core reset */
  235. DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
  236. /* turn off resync FIFO */
  237. DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
  238. /* Select MS1 byte-clk */
  239. DSI_W32(phy, DSIPHY_CMN_GLBL_CTRL, 0x10);
  240. /* Enable LDO */
  241. DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x59);
  242. /* Configure PHY lane swap */
  243. dsi_phy_hw_v3_0_lane_swap_config(phy, &cfg->lane_map);
  244. /* DSI PHY timings */
  245. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v3[0]);
  246. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v3[1]);
  247. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v3[2]);
  248. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v3[3]);
  249. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v3[4]);
  250. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v3[5]);
  251. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v3[6]);
  252. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v3[7]);
  253. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v3[8]);
  254. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v3[9]);
  255. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v3[10]);
  256. DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v3[11]);
  257. /* Remove power down from all blocks */
  258. DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
  259. /*power up lanes */
  260. data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
  261. /* TODO: only power up lanes that are used */
  262. data |= 0x1F;
  263. DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
  264. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x1F);
  265. /* Select full-rate mode */
  266. DSI_W32(phy, DSIPHY_CMN_CTRL_2, 0x40);
  267. switch (cfg->pll_source) {
  268. case DSI_PLL_SOURCE_STANDALONE:
  269. case DSI_PLL_SOURCE_NATIVE:
  270. data = 0x0; /* internal PLL */
  271. break;
  272. case DSI_PLL_SOURCE_NON_NATIVE:
  273. data = 0x1; /* external PLL */
  274. break;
  275. default:
  276. break;
  277. }
  278. DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
  279. /* DSI lane settings */
  280. dsi_phy_hw_v3_0_lane_settings(phy, cfg);
  281. DSI_PHY_DBG(phy, "Phy enabled\n");
  282. }
  283. /**
  284. * disable() - Disable PHY hardware
  285. * @phy: Pointer to DSI PHY hardware object.
  286. */
  287. void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy,
  288. struct dsi_phy_cfg *cfg)
  289. {
  290. u32 data = 0;
  291. if (dsi_phy_hw_v3_0_is_pll_on(phy))
  292. DSI_PHY_WARN(phy, "Turning OFF PHY while PLL is on\n");
  293. dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, false);
  294. data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
  295. /* disable all lanes */
  296. data &= ~0x1F;
  297. DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
  298. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0);
  299. /* Turn off all PHY blocks */
  300. DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x00);
  301. /* make sure phy is turned off */
  302. wmb();
  303. DSI_PHY_DBG(phy, "Phy disabled\n");
  304. }
  305. int dsi_phy_hw_v3_0_wait_for_lane_idle(
  306. struct dsi_phy_hw *phy, u32 lanes)
  307. {
  308. int rc = 0, val = 0;
  309. u32 stop_state_mask = 0;
  310. u32 const sleep_us = 10;
  311. u32 const timeout_us = 100;
  312. stop_state_mask = BIT(4); /* clock lane */
  313. if (lanes & DSI_DATA_LANE_0)
  314. stop_state_mask |= BIT(0);
  315. if (lanes & DSI_DATA_LANE_1)
  316. stop_state_mask |= BIT(1);
  317. if (lanes & DSI_DATA_LANE_2)
  318. stop_state_mask |= BIT(2);
  319. if (lanes & DSI_DATA_LANE_3)
  320. stop_state_mask |= BIT(3);
  321. DSI_PHY_DBG(phy, "polling for lanes to be in stop state, mask=0x%08x\n",
  322. stop_state_mask);
  323. rc = readl_poll_timeout(phy->base + DSIPHY_CMN_LANE_STATUS1, val,
  324. ((val & stop_state_mask) == stop_state_mask),
  325. sleep_us, timeout_us);
  326. if (rc) {
  327. DSI_PHY_ERR(phy, "lanes not in stop state, LANE_STATUS=0x%08x\n",
  328. val);
  329. return rc;
  330. }
  331. return 0;
  332. }
  333. void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
  334. struct dsi_phy_cfg *cfg, u32 lanes)
  335. {
  336. u32 reg = 0;
  337. if (lanes & DSI_CLOCK_LANE)
  338. reg = BIT(4);
  339. if (lanes & DSI_DATA_LANE_0)
  340. reg |= BIT(0);
  341. if (lanes & DSI_DATA_LANE_1)
  342. reg |= BIT(1);
  343. if (lanes & DSI_DATA_LANE_2)
  344. reg |= BIT(2);
  345. if (lanes & DSI_DATA_LANE_3)
  346. reg |= BIT(3);
  347. /*
  348. * ULPS entry request. Wait for short time to make sure
  349. * that the lanes enter ULPS. Recommended as per HPG.
  350. */
  351. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
  352. usleep_range(100, 110);
  353. /* disable LPRX and CDRX */
  354. dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, false);
  355. /* disable lane LDOs */
  356. DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x19);
  357. DSI_PHY_DBG(phy, "ULPS requested for lanes 0x%x\n", lanes);
  358. }
  359. int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy)
  360. {
  361. int ret = 0, loop = 10, u_dly = 200;
  362. u32 ln_status = 0;
  363. while ((ln_status != 0x1f) && loop) {
  364. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
  365. wmb(); /* ensure register is committed */
  366. loop--;
  367. udelay(u_dly);
  368. ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
  369. DSI_PHY_DBG(phy, "trial no: %d\n", loop);
  370. }
  371. if (!loop)
  372. DSI_PHY_DBG(phy, "could not reset phy lanes\n");
  373. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
  374. wmb(); /* ensure register is committed */
  375. return ret;
  376. }
  377. void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
  378. struct dsi_phy_cfg *cfg, u32 lanes)
  379. {
  380. u32 reg = 0;
  381. if (lanes & DSI_CLOCK_LANE)
  382. reg = BIT(4);
  383. if (lanes & DSI_DATA_LANE_0)
  384. reg |= BIT(0);
  385. if (lanes & DSI_DATA_LANE_1)
  386. reg |= BIT(1);
  387. if (lanes & DSI_DATA_LANE_2)
  388. reg |= BIT(2);
  389. if (lanes & DSI_DATA_LANE_3)
  390. reg |= BIT(3);
  391. /* enable lane LDOs */
  392. DSI_W32(phy, DSIPHY_CMN_VREG_CTRL, 0x59);
  393. /* enable LPRX and CDRX */
  394. dsi_phy_hw_v3_0_config_lpcdrx(phy, cfg, true);
  395. /* ULPS exit request */
  396. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, reg);
  397. usleep_range(1000, 1010);
  398. /* Clear ULPS request flags on all lanes */
  399. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, 0);
  400. /* Clear ULPS exit flags on all lanes */
  401. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, 0);
  402. /*
  403. * Sometimes when exiting ULPS, it is possible that some DSI
  404. * lanes are not in the stop state which could lead to DSI
  405. * commands not going through. To avoid this, force the lanes
  406. * to be in stop state.
  407. */
  408. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg);
  409. DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0);
  410. usleep_range(100, 110);
  411. }
  412. u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
  413. {
  414. u32 lanes = 0;
  415. lanes = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS0);
  416. DSI_PHY_DBG(phy, "lanes in ulps = 0x%x\n", lanes);
  417. return lanes;
  418. }
  419. bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
  420. {
  421. if (lanes & ulps_lanes)
  422. return false;
  423. return true;
  424. }
  425. int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
  426. u32 *timing_val, u32 size)
  427. {
  428. int i = 0;
  429. if (size != DSI_PHY_TIMING_V3_SIZE) {
  430. DSI_ERR("Unexpected timing array size %d\n", size);
  431. return -EINVAL;
  432. }
  433. for (i = 0; i < size; i++)
  434. timing_cfg->lane_v3[i] = timing_val[i];
  435. return 0;
  436. }
  437. void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
  438. struct dsi_phy_cfg *cfg, bool is_master)
  439. {
  440. u32 reg;
  441. if (is_master) {
  442. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
  443. DSIPHY_CMN_GLBL_CTRL, DSIPHY_CMN_VREG_CTRL,
  444. 0x10, 0x59);
  445. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
  446. DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
  447. cfg->timing.lane_v3[0], cfg->timing.lane_v3[1]);
  448. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
  449. DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
  450. cfg->timing.lane_v3[2], cfg->timing.lane_v3[3]);
  451. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
  452. DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
  453. cfg->timing.lane_v3[4], cfg->timing.lane_v3[5]);
  454. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
  455. DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
  456. cfg->timing.lane_v3[6], cfg->timing.lane_v3[7]);
  457. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
  458. DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
  459. cfg->timing.lane_v3[8], cfg->timing.lane_v3[9]);
  460. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
  461. DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
  462. cfg->timing.lane_v3[10], cfg->timing.lane_v3[11]);
  463. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL16,
  464. DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
  465. 0x7f, 0x1f);
  466. } else {
  467. reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG0);
  468. reg &= ~BIT(5);
  469. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
  470. DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_PLL_CNTRL,
  471. reg, 0x0);
  472. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
  473. DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_GLBL_CTRL,
  474. 0x0, 0x10);
  475. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
  476. DSIPHY_CMN_VREG_CTRL, DSIPHY_CMN_TIMING_CTRL_0,
  477. 0x59, cfg->timing.lane_v3[0]);
  478. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
  479. DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
  480. cfg->timing.lane_v3[1], cfg->timing.lane_v3[2]);
  481. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
  482. DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
  483. cfg->timing.lane_v3[3], cfg->timing.lane_v3[4]);
  484. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
  485. DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
  486. cfg->timing.lane_v3[5], cfg->timing.lane_v3[6]);
  487. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
  488. DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
  489. cfg->timing.lane_v3[7], cfg->timing.lane_v3[8]);
  490. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
  491. DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
  492. cfg->timing.lane_v3[9], cfg->timing.lane_v3[10]);
  493. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
  494. DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_CTRL_0,
  495. cfg->timing.lane_v3[11], 0x7f);
  496. DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
  497. DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
  498. 0x1f, 0x40);
  499. /*
  500. * fill with dummy register writes since controller will blindly
  501. * send these values to DSI PHY.
  502. */
  503. reg = DSI_DYN_REFRESH_PLL_CTRL11;
  504. while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
  505. DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
  506. DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
  507. 0x1f, 0x7f);
  508. reg += 0x4;
  509. }
  510. DSI_GEN_W32(phy->dyn_pll_base,
  511. DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
  512. DSI_GEN_W32(phy->dyn_pll_base,
  513. DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
  514. }
  515. wmb(); /* make sure all registers are updated */
  516. }
  517. void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
  518. struct dsi_dyn_clk_delay *delay)
  519. {
  520. if (!delay)
  521. return;
  522. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
  523. delay->pipe_delay);
  524. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
  525. delay->pipe_delay2);
  526. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
  527. delay->pll_delay);
  528. }
  529. void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
  530. {
  531. u32 reg;
  532. /*
  533. * if no offset is mentioned then this means we want to clear
  534. * the dynamic refresh ctrl register which is the last step
  535. * of dynamic refresh sequence.
  536. */
  537. if (!offset) {
  538. reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
  539. reg &= ~(BIT(0) | BIT(8));
  540. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
  541. wmb(); /* ensure dynamic fps is cleared */
  542. return;
  543. }
  544. if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
  545. reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
  546. reg |= BIT(13);
  547. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
  548. }
  549. if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
  550. reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
  551. reg |= BIT(16);
  552. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
  553. }
  554. if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
  555. reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
  556. reg |= BIT(0);
  557. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
  558. }
  559. if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
  560. reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
  561. reg |= BIT(8);
  562. DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
  563. wmb(); /* ensure dynamic fps is triggered */
  564. }
  565. }
  566. int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
  567. u32 *dst, u32 size)
  568. {
  569. int i;
  570. if (!timings || !dst || !size)
  571. return -EINVAL;
  572. if (size != DSI_PHY_TIMING_V3_SIZE) {
  573. DSI_ERR("size mis-match\n");
  574. return -EINVAL;
  575. }
  576. for (i = 0; i < size; i++)
  577. dst[i] = timings->lane_v3[i];
  578. return 0;
  579. }