phy-qcom-ufs-qmp-v4-pineapple.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include "phy-qcom-ufs-qmp-v4-pineapple.h"
  6. #define UFS_PHY_NAME "ufs_phy_qmp_v4_pineapple"
  7. static inline void ufs_qcom_phy_qmp_v4_start_serdes(struct ufs_qcom_phy *phy);
  8. static int ufs_qcom_phy_qmp_v4_is_pcs_ready(struct ufs_qcom_phy *phy_common);
  9. static int ufs_qcom_phy_qmp_v4_phy_calibrate(struct phy *generic_phy)
  10. {
  11. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  12. struct device *dev = ufs_qcom_phy->dev;
  13. bool is_rate_B;
  14. int submode;
  15. int err;
  16. err = reset_control_assert(ufs_qcom_phy->ufs_reset);
  17. if (err) {
  18. dev_err(dev, "Failed to assert UFS PHY reset %d\n", err);
  19. goto out;
  20. }
  21. /* For UFS PHY's submode, 2 = G5, 1 = G4, 0 = non-G4/G5 */
  22. submode = ufs_qcom_phy->submode;
  23. if (submode != UFS_QCOM_PHY_SUBMODE_G4 &&
  24. submode != UFS_QCOM_PHY_SUBMODE_G5) {
  25. dev_err(dev, "%s: unsupported submode.\n", __func__);
  26. return -EOPNOTSUPP;
  27. }
  28. is_rate_B = (ufs_qcom_phy->mode == PHY_MODE_UFS_HS_B) ? true : false;
  29. writel_relaxed(0x01, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
  30. /* Ensure PHY is in reset before writing PHY calibration data */
  31. wmb();
  32. /*
  33. * Writing PHY calibration in this order:
  34. * 1. Write Rate-A calibration first (1-lane mode).
  35. * 2. Write 2nd lane configuration if needed.
  36. * 3. Write Rate-B calibration overrides
  37. */
  38. /* Same PHY HSG5 settings are used for HSG4 */
  39. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_g5,
  40. ARRAY_SIZE(phy_cal_table_rate_A_g5));
  41. if (submode == UFS_QCOM_PHY_SUBMODE_G4)
  42. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_g4,
  43. ARRAY_SIZE(phy_cal_table_rate_A_g4));
  44. if (ufs_qcom_phy->lanes_per_direction == 2)
  45. ufs_qcom_phy_write_tbl(ufs_qcom_phy,
  46. phy_cal_table_2nd_lane,
  47. ARRAY_SIZE(phy_cal_table_2nd_lane));
  48. if (is_rate_B)
  49. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_B,
  50. ARRAY_SIZE(phy_cal_table_rate_B));
  51. writel_relaxed(0x00, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
  52. /* flush buffered writes */
  53. wmb();
  54. err = reset_control_deassert(ufs_qcom_phy->ufs_reset);
  55. if (err) {
  56. dev_err(dev, "Failed to deassert UFS PHY reset %d\n", err);
  57. goto out;
  58. }
  59. ufs_qcom_phy_qmp_v4_start_serdes(ufs_qcom_phy);
  60. err = ufs_qcom_phy_qmp_v4_is_pcs_ready(ufs_qcom_phy);
  61. out:
  62. return err;
  63. }
  64. static int ufs_qcom_phy_qmp_v4_init(struct phy *generic_phy)
  65. {
  66. struct ufs_qcom_phy_qmp_v4 *phy = phy_get_drvdata(generic_phy);
  67. struct ufs_qcom_phy *phy_common = &phy->common_cfg;
  68. int err;
  69. err = ufs_qcom_phy_init_clks(phy_common);
  70. if (err) {
  71. dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
  72. __func__, err);
  73. goto out;
  74. }
  75. err = ufs_qcom_phy_init_vregulators(phy_common);
  76. if (err) {
  77. dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
  78. __func__, err);
  79. goto out;
  80. }
  81. /* Optional */
  82. ufs_qcom_phy_get_reset(phy_common);
  83. out:
  84. return err;
  85. }
  86. static int ufs_qcom_phy_qmp_v4_exit(struct phy *generic_phy)
  87. {
  88. return 0;
  89. }
  90. static
  91. int ufs_qcom_phy_qmp_v4_set_mode(struct phy *generic_phy,
  92. enum phy_mode mode, int submode)
  93. {
  94. struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
  95. phy_common->mode = PHY_MODE_INVALID;
  96. if (mode > 0)
  97. phy_common->mode = mode;
  98. phy_common->submode = submode;
  99. return 0;
  100. }
  101. static inline
  102. void ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(struct ufs_qcom_phy *phy,
  103. bool enable)
  104. {
  105. u32 temp;
  106. temp = readl_relaxed(phy->mmio + QSERDES_RX0_RX_INTERFACE_MODE);
  107. if (enable)
  108. temp |= QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
  109. else
  110. temp &= ~QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
  111. writel_relaxed(temp, phy->mmio + QSERDES_RX0_RX_INTERFACE_MODE);
  112. if (phy->lanes_per_direction == 1)
  113. goto out;
  114. temp = readl_relaxed(phy->mmio + QSERDES_RX1_RX_INTERFACE_MODE);
  115. if (enable)
  116. temp |= QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
  117. else
  118. temp &= ~QSERDES_RX_INTERFACE_MODE_CLOCK_EDGE_BIT;
  119. writel_relaxed(temp, phy->mmio + QSERDES_RX1_RX_INTERFACE_MODE);
  120. out:
  121. /* ensure register value is committed */
  122. mb();
  123. }
  124. static
  125. void ufs_qcom_phy_qmp_v4_power_control(struct ufs_qcom_phy *phy,
  126. bool power_ctrl)
  127. {
  128. if (!power_ctrl) {
  129. /* apply analog power collapse */
  130. writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
  131. /*
  132. * Make sure that PHY knows its analog rail is going to be
  133. * powered OFF.
  134. */
  135. mb();
  136. ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(phy, true);
  137. } else {
  138. ufs_qcom_phy_qmp_v4_tx_pull_down_ctrl(phy, false);
  139. /* bring PHY out of analog power collapse */
  140. writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
  141. /*
  142. * Before any transactions involving PHY, ensure PHY knows
  143. * that it's analog rail is powered ON.
  144. */
  145. mb();
  146. }
  147. }
  148. /* Refer to MPHY Spec Table-40 */
  149. #define DEEMPHASIS_3_5_dB 0x04
  150. #define NO_DEEMPHASIS 0x0
  151. static inline
  152. u32 ufs_qcom_phy_qmp_v4_get_tx_hs_equalizer(struct ufs_qcom_phy *phy, u32 gear)
  153. {
  154. if (gear == 5)
  155. return DEEMPHASIS_3_5_dB;
  156. /* Gear 1-4 setting */
  157. return NO_DEEMPHASIS;
  158. }
  159. static inline
  160. void ufs_qcom_phy_qmp_v4_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
  161. {
  162. /*
  163. * v4 PHY does not have TX_LANE_ENABLE register.
  164. * Implement this function so as not to propagate error to caller.
  165. */
  166. }
  167. static
  168. void ufs_qcom_phy_qmp_v4_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
  169. {
  170. u32 temp;
  171. temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
  172. if (ctrl) /* enable RX LineCfg */
  173. temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
  174. else /* disable RX LineCfg */
  175. temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
  176. writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
  177. /* make sure that RX LineCfg config applied before we return */
  178. mb();
  179. }
  180. static inline void ufs_qcom_phy_qmp_v4_start_serdes(struct ufs_qcom_phy *phy)
  181. {
  182. u32 tmp;
  183. tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
  184. tmp &= ~MASK_SERDES_START;
  185. tmp |= (1 << OFFSET_SERDES_START);
  186. writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
  187. /* Ensure register value is committed */
  188. mb();
  189. }
  190. static int ufs_qcom_phy_qmp_v4_is_pcs_ready(struct ufs_qcom_phy *phy_common)
  191. {
  192. int err = 0;
  193. u32 val;
  194. err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
  195. val, (val & MASK_PCS_READY), 10, 1000000);
  196. if (err) {
  197. dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
  198. __func__, err);
  199. goto out;
  200. }
  201. out:
  202. return err;
  203. }
  204. static void ufs_qcom_phy_qmp_v4_dbg_register_dump(struct ufs_qcom_phy *phy)
  205. {
  206. ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
  207. "PHY QSERDES COM Registers ");
  208. ufs_qcom_phy_dump_regs(phy, PCS2_BASE, PCS2_SIZE,
  209. "PHY PCS2 Registers ");
  210. ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
  211. "PHY Registers ");
  212. ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
  213. "PHY RX0 Registers ");
  214. ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
  215. "PHY TX0 Registers ");
  216. ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
  217. "PHY RX1 Registers ");
  218. ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
  219. "PHY TX1 Registers ");
  220. }
  221. static void ufs_qcom_phy_qmp_v4_dbg_register_save(struct ufs_qcom_phy *phy)
  222. {
  223. ufs_qcom_phy_save_regs(phy, COM_BASE, COM_SIZE,
  224. "PHY QSERDES COM Registers ");
  225. ufs_qcom_phy_save_regs(phy, PCS2_BASE, PCS2_SIZE,
  226. "PHY PCS2 Registers ");
  227. ufs_qcom_phy_save_regs(phy, PHY_BASE, PHY_SIZE,
  228. "PHY Registers ");
  229. ufs_qcom_phy_save_regs(phy, RX_BASE(0), RX_SIZE,
  230. "PHY RX0 Registers ");
  231. ufs_qcom_phy_save_regs(phy, TX_BASE(0), TX_SIZE,
  232. "PHY TX0 Registers ");
  233. ufs_qcom_phy_save_regs(phy, RX_BASE(1), RX_SIZE,
  234. "PHY RX1 Registers ");
  235. ufs_qcom_phy_save_regs(phy, TX_BASE(1), TX_SIZE,
  236. "PHY TX1 Registers ");
  237. }
  238. static const struct phy_ops ufs_qcom_phy_qmp_v4_phy_ops = {
  239. .init = ufs_qcom_phy_qmp_v4_init,
  240. .exit = ufs_qcom_phy_qmp_v4_exit,
  241. .power_on = ufs_qcom_phy_power_on,
  242. .power_off = ufs_qcom_phy_power_off,
  243. .set_mode = ufs_qcom_phy_qmp_v4_set_mode,
  244. .calibrate = ufs_qcom_phy_qmp_v4_phy_calibrate,
  245. .owner = THIS_MODULE,
  246. };
  247. static struct ufs_qcom_phy_specific_ops phy_v4_ops = {
  248. .start_serdes = ufs_qcom_phy_qmp_v4_start_serdes,
  249. .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_v4_is_pcs_ready,
  250. .set_tx_lane_enable = ufs_qcom_phy_qmp_v4_set_tx_lane_enable,
  251. .ctrl_rx_linecfg = ufs_qcom_phy_qmp_v4_ctrl_rx_linecfg,
  252. .power_control = ufs_qcom_phy_qmp_v4_power_control,
  253. .get_tx_hs_equalizer = ufs_qcom_phy_qmp_v4_get_tx_hs_equalizer,
  254. .dbg_register_dump = ufs_qcom_phy_qmp_v4_dbg_register_dump,
  255. .dbg_register_save = ufs_qcom_phy_qmp_v4_dbg_register_save,
  256. };
  257. static int ufs_qcom_phy_qmp_v4_probe(struct platform_device *pdev)
  258. {
  259. struct device *dev = &pdev->dev;
  260. struct phy *generic_phy;
  261. struct ufs_qcom_phy_qmp_v4 *phy;
  262. int err = 0;
  263. phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
  264. if (!phy) {
  265. err = -ENOMEM;
  266. goto out;
  267. }
  268. generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
  269. &ufs_qcom_phy_qmp_v4_phy_ops, &phy_v4_ops);
  270. if (!generic_phy) {
  271. dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
  272. __func__);
  273. err = -EIO;
  274. goto out;
  275. }
  276. phy_set_drvdata(generic_phy, phy);
  277. strscpy(phy->common_cfg.name, UFS_PHY_NAME,
  278. sizeof(phy->common_cfg.name));
  279. out:
  280. return err;
  281. }
  282. static const struct of_device_id ufs_qcom_phy_qmp_v4_of_match[] = {
  283. {.compatible = "qcom,ufs-phy-qmp-v4-pineapple"},
  284. {.compatible = "qcom,ufs-phy-qmp-v4-cliffs"},
  285. {},
  286. };
  287. MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v4_of_match);
  288. static struct platform_driver ufs_qcom_phy_qmp_v4_driver = {
  289. .probe = ufs_qcom_phy_qmp_v4_probe,
  290. .driver = {
  291. .of_match_table = ufs_qcom_phy_qmp_v4_of_match,
  292. .name = "ufs_qcom_phy_qmp_v4_pineapple",
  293. },
  294. };
  295. module_platform_driver(ufs_qcom_phy_qmp_v4_driver);
  296. MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v4 PINEAPPLE");
  297. MODULE_LICENSE("GPL");