phy-qcom-ufs-qmp-v4.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include "phy-qcom-ufs-qmp-v4.h"
  6. #define UFS_PHY_NAME "ufs_phy_qmp_v4"
  7. #define check_v1(major, minor, step) \
  8. ((major == 0x4) && (minor == 0x000) && (step == 0x0000))
  9. #define check_v2(major, minor, step) \
  10. ((major == 0x4) && (minor == 0x001) && (step == 0x0000))
  11. static inline void ufs_qcom_phy_qmp_v4_start_serdes(struct ufs_qcom_phy *phy);
  12. static int ufs_qcom_phy_qmp_v4_is_pcs_ready(struct ufs_qcom_phy *phy_common);
  13. static
  14. int ufs_qcom_phy_qmp_v4_phy_calibrate(struct phy *generic_phy)
  15. {
  16. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  17. struct device *dev = ufs_qcom_phy->dev;
  18. bool is_g4, is_rate_B;
  19. int err;
  20. u8 major = ufs_qcom_phy->host_ctrl_rev_major;
  21. u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
  22. u16 step = ufs_qcom_phy->host_ctrl_rev_step;
  23. err = reset_control_assert(ufs_qcom_phy->ufs_reset);
  24. if (err) {
  25. dev_err(dev, "Failed to assert UFS PHY reset %d\n", err);
  26. goto out;
  27. }
  28. /* For UFS PHY's submode, 1 = G4, 0 = non-G4 */
  29. is_g4 = !!ufs_qcom_phy->submode;
  30. is_rate_B = (ufs_qcom_phy->mode == PHY_MODE_UFS_HS_B) ? true : false;
  31. writel_relaxed(0x01, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
  32. /* Ensure PHY is in reset before writing PHY calibration data */
  33. wmb();
  34. /*
  35. * Writing PHY calibration in this order:
  36. * 1. Write Rate-A calibration first (1-lane mode).
  37. * Apply G3 or G4 specific settings (v2 may have additional
  38. * settings).
  39. * 2. Write 2nd lane configuration if needed.
  40. * Apply G3 or G4 specific settings (v2 may have additional
  41. * settings).
  42. * 3. Write Rate-B calibration overrides
  43. */
  44. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A,
  45. ARRAY_SIZE(phy_cal_table_rate_A));
  46. if (!is_g4)
  47. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_g3,
  48. ARRAY_SIZE(phy_cal_table_rate_A_g3));
  49. else
  50. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_A_g4,
  51. ARRAY_SIZE(phy_cal_table_rate_A_g4));
  52. if (check_v2(major, minor, step)) {
  53. if (!is_g4)
  54. ufs_qcom_phy_write_tbl(ufs_qcom_phy,
  55. phy_cal_table_rate_A_v2_g3,
  56. ARRAY_SIZE(phy_cal_table_rate_A_v2_g3));
  57. else
  58. ufs_qcom_phy_write_tbl(ufs_qcom_phy,
  59. phy_cal_table_rate_A_v2_g4,
  60. ARRAY_SIZE(phy_cal_table_rate_A_v2_g4));
  61. }
  62. if (ufs_qcom_phy->lanes_per_direction == 2) {
  63. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_2nd_lane,
  64. ARRAY_SIZE(phy_cal_table_2nd_lane));
  65. if (check_v2(major, minor, step)) {
  66. if (!is_g4)
  67. ufs_qcom_phy_write_tbl(ufs_qcom_phy,
  68. phy_cal_table_2nd_lane_v2_g3,
  69. ARRAY_SIZE(phy_cal_table_2nd_lane_v2_g3));
  70. else
  71. ufs_qcom_phy_write_tbl(ufs_qcom_phy,
  72. phy_cal_table_2nd_lane_v2_g4,
  73. ARRAY_SIZE(phy_cal_table_2nd_lane_v2_g4));
  74. }
  75. }
  76. if (is_rate_B)
  77. ufs_qcom_phy_write_tbl(ufs_qcom_phy, phy_cal_table_rate_B,
  78. ARRAY_SIZE(phy_cal_table_rate_B));
  79. if (check_v1(major, minor, step)) {
  80. writel_relaxed(0x01, ufs_qcom_phy->mmio +
  81. QSERDES_RX0_AC_JTAG_ENABLE);
  82. writel_relaxed(0x01, ufs_qcom_phy->mmio +
  83. QSERDES_RX0_AC_JTAG_MODE);
  84. writel_relaxed(0x01, ufs_qcom_phy->mmio +
  85. QSERDES_RX1_AC_JTAG_ENABLE);
  86. writel_relaxed(0x01, ufs_qcom_phy->mmio +
  87. QSERDES_RX1_AC_JTAG_MODE);
  88. }
  89. writel_relaxed(0x00, ufs_qcom_phy->mmio + UFS_PHY_SW_RESET);
  90. /* flush buffered writes */
  91. wmb();
  92. err = reset_control_deassert(ufs_qcom_phy->ufs_reset);
  93. if (err) {
  94. dev_err(dev, "Failed to deassert UFS PHY reset %d\n", err);
  95. goto out;
  96. }
  97. ufs_qcom_phy_qmp_v4_start_serdes(ufs_qcom_phy);
  98. err = ufs_qcom_phy_qmp_v4_is_pcs_ready(ufs_qcom_phy);
  99. out:
  100. return err;
  101. }
  102. static int ufs_qcom_phy_qmp_v4_init(struct phy *generic_phy)
  103. {
  104. struct ufs_qcom_phy_qmp_v4 *phy = phy_get_drvdata(generic_phy);
  105. struct ufs_qcom_phy *phy_common = &phy->common_cfg;
  106. int err;
  107. err = ufs_qcom_phy_init_clks(phy_common);
  108. if (err) {
  109. dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
  110. __func__, err);
  111. goto out;
  112. }
  113. err = ufs_qcom_phy_init_vregulators(phy_common);
  114. if (err) {
  115. dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
  116. __func__, err);
  117. goto out;
  118. }
  119. /* Optional */
  120. ufs_qcom_phy_get_reset(phy_common);
  121. out:
  122. return err;
  123. }
  124. static int ufs_qcom_phy_qmp_v4_exit(struct phy *generic_phy)
  125. {
  126. return 0;
  127. }
  128. static
  129. int ufs_qcom_phy_qmp_v4_set_mode(struct phy *generic_phy,
  130. enum phy_mode mode, int submode)
  131. {
  132. struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
  133. phy_common->mode = PHY_MODE_INVALID;
  134. if (mode > 0)
  135. phy_common->mode = mode;
  136. phy_common->submode = submode;
  137. return 0;
  138. }
  139. static
  140. void ufs_qcom_phy_qmp_v4_power_control(struct ufs_qcom_phy *phy,
  141. bool power_ctrl)
  142. {
  143. if (!power_ctrl) {
  144. /* apply analog power collapse */
  145. writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
  146. /*
  147. * Make sure that PHY knows its analog rail is going to be
  148. * powered OFF.
  149. */
  150. mb();
  151. } else {
  152. /* bring PHY out of analog power collapse */
  153. writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
  154. /*
  155. * Before any transactions involving PHY, ensure PHY knows
  156. * that it's analog rail is powered ON.
  157. */
  158. mb();
  159. }
  160. }
  161. static inline
  162. void ufs_qcom_phy_qmp_v4_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
  163. {
  164. /*
  165. * v4 PHY does not have TX_LANE_ENABLE register.
  166. * Implement this function so as not to propagate error to caller.
  167. */
  168. }
  169. static
  170. void ufs_qcom_phy_qmp_v4_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
  171. {
  172. u32 temp;
  173. temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
  174. if (ctrl) /* enable RX LineCfg */
  175. temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
  176. else /* disable RX LineCfg */
  177. temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
  178. writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
  179. /* make sure that RX LineCfg config applied before we return */
  180. mb();
  181. }
  182. static inline void ufs_qcom_phy_qmp_v4_start_serdes(struct ufs_qcom_phy *phy)
  183. {
  184. u32 tmp;
  185. tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
  186. tmp &= ~MASK_SERDES_START;
  187. tmp |= (1 << OFFSET_SERDES_START);
  188. writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
  189. /* Ensure register value is committed */
  190. mb();
  191. }
  192. static int ufs_qcom_phy_qmp_v4_is_pcs_ready(struct ufs_qcom_phy *phy_common)
  193. {
  194. int err = 0;
  195. u32 val;
  196. err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
  197. val, (val & MASK_PCS_READY), 10, 1000000);
  198. if (err) {
  199. dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
  200. __func__, err);
  201. goto out;
  202. }
  203. out:
  204. return err;
  205. }
  206. static void ufs_qcom_phy_qmp_v4_dbg_register_dump(struct ufs_qcom_phy *phy)
  207. {
  208. ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
  209. "PHY QSERDES COM Registers ");
  210. ufs_qcom_phy_dump_regs(phy, PCS2_BASE, PCS2_SIZE,
  211. "PHY PCS2 Registers ");
  212. ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
  213. "PHY Registers ");
  214. ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
  215. "PHY RX0 Registers ");
  216. ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
  217. "PHY TX0 Registers ");
  218. ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
  219. "PHY RX1 Registers ");
  220. ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
  221. "PHY TX1 Registers ");
  222. }
  223. static const struct phy_ops ufs_qcom_phy_qmp_v4_phy_ops = {
  224. .init = ufs_qcom_phy_qmp_v4_init,
  225. .exit = ufs_qcom_phy_qmp_v4_exit,
  226. .power_on = ufs_qcom_phy_power_on,
  227. .power_off = ufs_qcom_phy_power_off,
  228. .set_mode = ufs_qcom_phy_qmp_v4_set_mode,
  229. .calibrate = ufs_qcom_phy_qmp_v4_phy_calibrate,
  230. .owner = THIS_MODULE,
  231. };
  232. static struct ufs_qcom_phy_specific_ops phy_v4_ops = {
  233. .start_serdes = ufs_qcom_phy_qmp_v4_start_serdes,
  234. .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_v4_is_pcs_ready,
  235. .set_tx_lane_enable = ufs_qcom_phy_qmp_v4_set_tx_lane_enable,
  236. .ctrl_rx_linecfg = ufs_qcom_phy_qmp_v4_ctrl_rx_linecfg,
  237. .power_control = ufs_qcom_phy_qmp_v4_power_control,
  238. .dbg_register_dump = ufs_qcom_phy_qmp_v4_dbg_register_dump,
  239. };
  240. static int ufs_qcom_phy_qmp_v4_probe(struct platform_device *pdev)
  241. {
  242. struct device *dev = &pdev->dev;
  243. struct phy *generic_phy;
  244. struct ufs_qcom_phy_qmp_v4 *phy;
  245. int err = 0;
  246. phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
  247. if (!phy) {
  248. err = -ENOMEM;
  249. goto out;
  250. }
  251. generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
  252. &ufs_qcom_phy_qmp_v4_phy_ops, &phy_v4_ops);
  253. if (!generic_phy) {
  254. dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
  255. __func__);
  256. err = -EIO;
  257. goto out;
  258. }
  259. phy_set_drvdata(generic_phy, phy);
  260. strscpy(phy->common_cfg.name, UFS_PHY_NAME,
  261. sizeof(phy->common_cfg.name));
  262. out:
  263. return err;
  264. }
  265. static const struct of_device_id ufs_qcom_phy_qmp_v4_of_match[] = {
  266. {.compatible = "qcom,ufs-phy-qmp-v4"},
  267. {},
  268. };
  269. MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v4_of_match);
  270. static struct platform_driver ufs_qcom_phy_qmp_v4_driver = {
  271. .probe = ufs_qcom_phy_qmp_v4_probe,
  272. .driver = {
  273. .of_match_table = ufs_qcom_phy_qmp_v4_of_match,
  274. .name = "ufs_qcom_phy_qmp_v4",
  275. },
  276. };
  277. module_platform_driver(ufs_qcom_phy_qmp_v4_driver);
  278. MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v4");
  279. MODULE_LICENSE("GPL");