phy-qcom-ufs.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2021, Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "phy-qcom-ufs-i.h"
  7. #define MAX_PROP_NAME 32
  8. #define VDDA_PHY_MIN_UV 880000
  9. #define VDDA_PHY_MAX_UV 950000
  10. #define VDDA_PLL_MIN_UV 1200000
  11. #define VDDA_PLL_MAX_UV 1800000
  12. #define VDDP_REF_CLK_MIN_UV 1200000
  13. #define VDDP_REF_CLK_MAX_UV 1200000
  14. #define VDDA_QREF_MIN_UV 880000
  15. #define VDDA_QREF_MAX_UV 912000
  16. #define UFS_PHY_DEFAULT_LANES_PER_DIRECTION 1
  17. /**
  18. * struct ufs_qcom_phy_regs - record the info of ufs qcom phy register domain.
  19. * @list_head: the list to find all ufs phy register domins.
  20. * @prefix: the name of this register domain.
  21. * @ptr: the pointer to memory address which save the register value.
  22. * @len: the size of this register domain.
  23. */
  24. struct ufs_qcom_phy_regs {
  25. struct list_head list;
  26. const char *prefix;
  27. u32 *ptr;
  28. size_t len;
  29. };
  30. static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy);
  31. static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy);
  32. void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
  33. struct ufs_qcom_phy_calibration *tbl,
  34. int tbl_size)
  35. {
  36. int i;
  37. for (i = 0; i < tbl_size; i++)
  38. writel_relaxed(tbl[i].cfg_value,
  39. ufs_qcom_phy->mmio + tbl[i].reg_offset);
  40. }
  41. EXPORT_SYMBOL(ufs_qcom_phy_write_tbl);
  42. int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
  43. struct ufs_qcom_phy_calibration *tbl_A,
  44. int tbl_size_A,
  45. struct ufs_qcom_phy_calibration *tbl_B,
  46. int tbl_size_B, bool is_rate_B)
  47. {
  48. struct device *dev = ufs_qcom_phy->dev;
  49. int ret = 0;
  50. ret = reset_control_assert(ufs_qcom_phy->ufs_reset);
  51. if (ret) {
  52. dev_err(dev, "Failed to assert UFS PHY reset %d\n", ret);
  53. goto out;
  54. }
  55. if (!tbl_A) {
  56. dev_err(dev, "%s: tbl_A is NULL\n", __func__);
  57. ret = EINVAL;
  58. goto out;
  59. }
  60. ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_A, tbl_size_A);
  61. /*
  62. * In case we would like to work in rate B, we need
  63. * to override a registers that were configured in rate A table
  64. * with registers of rate B table.
  65. * table.
  66. */
  67. if (is_rate_B) {
  68. if (!tbl_B) {
  69. dev_err(dev, "%s: tbl_B is NULL\n",
  70. __func__);
  71. ret = EINVAL;
  72. goto out;
  73. }
  74. ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_B, tbl_size_B);
  75. }
  76. /* flush buffered writes */
  77. mb();
  78. ret = reset_control_deassert(ufs_qcom_phy->ufs_reset);
  79. if (ret)
  80. dev_err(dev, "Failed to deassert UFS PHY reset %d\n", ret);
  81. ret = ufs_qcom_phy_start_serdes(ufs_qcom_phy);
  82. if (ret)
  83. goto out;
  84. ret = ufs_qcom_phy_is_pcs_ready(ufs_qcom_phy);
  85. out:
  86. return ret;
  87. }
  88. EXPORT_SYMBOL(ufs_qcom_phy_calibrate);
  89. /*
  90. * This assumes the embedded phy structure inside generic_phy is of type
  91. * struct ufs_qcom_phy. In order to function properly it's crucial
  92. * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
  93. * as the first inside generic_phy.
  94. */
  95. struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
  96. {
  97. return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
  98. }
  99. EXPORT_SYMBOL(get_ufs_qcom_phy);
  100. static
  101. int ufs_qcom_phy_base_init(struct platform_device *pdev,
  102. struct ufs_qcom_phy *phy_common)
  103. {
  104. struct device *dev = &pdev->dev;
  105. struct resource *res;
  106. int err = 0;
  107. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
  108. phy_common->mmio = devm_ioremap_resource(dev, res);
  109. if (IS_ERR((void const *)phy_common->mmio)) {
  110. err = PTR_ERR((void const *)phy_common->mmio);
  111. phy_common->mmio = NULL;
  112. dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
  113. __func__, err);
  114. return err;
  115. }
  116. return 0;
  117. }
  118. struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
  119. struct ufs_qcom_phy *common_cfg,
  120. const struct phy_ops *ufs_qcom_phy_gen_ops,
  121. struct ufs_qcom_phy_specific_ops *phy_spec_ops)
  122. {
  123. int err;
  124. struct device *dev = &pdev->dev;
  125. struct phy *generic_phy = NULL;
  126. struct phy_provider *phy_provider;
  127. err = ufs_qcom_phy_base_init(pdev, common_cfg);
  128. if (err) {
  129. dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
  130. goto out;
  131. }
  132. phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
  133. if (IS_ERR(phy_provider)) {
  134. err = PTR_ERR(phy_provider);
  135. dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
  136. goto out;
  137. }
  138. generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
  139. if (IS_ERR(generic_phy)) {
  140. err = PTR_ERR(generic_phy);
  141. dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
  142. generic_phy = NULL;
  143. goto out;
  144. }
  145. if (of_property_read_u32(dev->of_node, "lanes-per-direction",
  146. &common_cfg->lanes_per_direction))
  147. common_cfg->lanes_per_direction =
  148. UFS_PHY_DEFAULT_LANES_PER_DIRECTION;
  149. /*
  150. * UFS PHY power management is managed by its parent (UFS host
  151. * controller) hence set the no runtime PM callbacks flag
  152. * on UFS PHY device to avoid any accidental attempt to call the
  153. * PM callbacks for PHY device.
  154. */
  155. pm_runtime_no_callbacks(&generic_phy->dev);
  156. common_cfg->phy_spec_ops = phy_spec_ops;
  157. common_cfg->dev = dev;
  158. /*
  159. * Init PHY register domain list. We use it to manage the memory space which be used
  160. * to save UFS PHY register value.
  161. */
  162. INIT_LIST_HEAD(&common_cfg->regs_list_head);
  163. out:
  164. return generic_phy;
  165. }
  166. EXPORT_SYMBOL(ufs_qcom_phy_generic_probe);
  167. int ufs_qcom_phy_get_reset(struct ufs_qcom_phy *phy_common)
  168. {
  169. struct reset_control *reset;
  170. if (phy_common->ufs_reset)
  171. return 0;
  172. reset = devm_reset_control_get_exclusive_by_index(phy_common->dev, 0);
  173. if (IS_ERR(reset))
  174. return PTR_ERR(reset);
  175. phy_common->ufs_reset = reset;
  176. return 0;
  177. }
  178. EXPORT_SYMBOL(ufs_qcom_phy_get_reset);
  179. static int __ufs_qcom_phy_clk_get(struct device *dev,
  180. const char *name, struct clk **clk_out, bool err_print)
  181. {
  182. struct clk *clk;
  183. int err = 0;
  184. clk = devm_clk_get(dev, name);
  185. if (IS_ERR(clk)) {
  186. err = PTR_ERR(clk);
  187. if (err_print)
  188. dev_err(dev, "failed get %s, %d\n", name, err);
  189. } else {
  190. *clk_out = clk;
  191. }
  192. return err;
  193. }
  194. static int ufs_qcom_phy_clk_get(struct device *dev,
  195. const char *name, struct clk **clk_out)
  196. {
  197. return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
  198. }
  199. int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
  200. {
  201. int err;
  202. if (of_device_is_compatible(phy_common->dev->of_node,
  203. "qcom,msm8996-ufs-phy-qmp-14nm"))
  204. goto skip_txrx_clk;
  205. /*
  206. * tx_iface_clk does not exist in newer version of ufs-phy HW,
  207. * so don't return error if it is not found
  208. */
  209. __ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
  210. &phy_common->tx_iface_clk, false);
  211. /*
  212. * rx_iface_clk does not exist in newer version of ufs-phy HW,
  213. * so don't return error if it is not found
  214. */
  215. __ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
  216. &phy_common->rx_iface_clk, false);
  217. skip_txrx_clk:
  218. err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
  219. &phy_common->ref_clk_src);
  220. if (err)
  221. goto out;
  222. /*
  223. * "ref_clk_parent" is optional hence don't abort init if it's not
  224. * found.
  225. */
  226. __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
  227. &phy_common->ref_clk_parent, false);
  228. /*
  229. * "ref_clk_pad_en" is only required in case where UFS_PHY and
  230. * UFS_REF_CLK_BSM both needs to be enabled for REF clock supply
  231. * to card. Hence don't abort init if it's not found.
  232. */
  233. __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_pad_en",
  234. &phy_common->ref_clk_pad_en, false);
  235. /*
  236. * Some platforms may not have the ON/OFF control for reference clock,
  237. * hence this clock may be optional.
  238. */
  239. __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
  240. &phy_common->ref_clk, false);
  241. /*
  242. * "ref_aux_clk" is optional and only supported by certain
  243. * phy versions, don't abort init if it's not found.
  244. */
  245. __ufs_qcom_phy_clk_get(phy_common->dev, "ref_aux_clk",
  246. &phy_common->ref_aux_clk, false);
  247. /*
  248. * "qref_clk_signal" is optional. It is needed for certain platforms.
  249. * No need to abort if it's not present.
  250. */
  251. __ufs_qcom_phy_clk_get(phy_common->dev, "qref_clk",
  252. &phy_common->qref_clk, false);
  253. __ufs_qcom_phy_clk_get(phy_common->dev, "rx_sym0_mux_clk",
  254. &phy_common->rx_sym0_mux_clk, false);
  255. __ufs_qcom_phy_clk_get(phy_common->dev, "rx_sym1_mux_clk",
  256. &phy_common->rx_sym1_mux_clk, false);
  257. __ufs_qcom_phy_clk_get(phy_common->dev, "tx_sym0_mux_clk",
  258. &phy_common->tx_sym0_mux_clk, false);
  259. __ufs_qcom_phy_clk_get(phy_common->dev, "rx_sym0_phy_clk",
  260. &phy_common->rx_sym0_phy_clk, false);
  261. __ufs_qcom_phy_clk_get(phy_common->dev, "rx_sym1_phy_clk",
  262. &phy_common->rx_sym1_phy_clk, false);
  263. __ufs_qcom_phy_clk_get(phy_common->dev, "tx_sym0_phy_clk",
  264. &phy_common->tx_sym0_phy_clk, false);
  265. if (!phy_common->rx_sym0_mux_clk ||
  266. !phy_common->rx_sym1_mux_clk ||
  267. !phy_common->tx_sym0_mux_clk ||
  268. !phy_common->ref_clk_src ||
  269. !phy_common->rx_sym0_phy_clk ||
  270. !phy_common->rx_sym1_phy_clk ||
  271. !phy_common->tx_sym0_phy_clk)
  272. dev_err(phy_common->dev, "%s: null clock\n", __func__);
  273. out:
  274. return err;
  275. }
  276. EXPORT_SYMBOL(ufs_qcom_phy_init_clks);
  277. static int ufs_qcom_phy_init_vreg(struct device *dev,
  278. struct ufs_qcom_phy_vreg *vreg,
  279. const char *name)
  280. {
  281. int err = 0;
  282. char prop_name[MAX_PROP_NAME];
  283. if (dev->of_node) {
  284. snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
  285. if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
  286. dev_dbg(dev, "No vreg data found for %s\n", prop_name);
  287. return -ENODATA;
  288. }
  289. }
  290. vreg->name = name;
  291. vreg->reg = devm_regulator_get(dev, name);
  292. if (IS_ERR(vreg->reg)) {
  293. err = PTR_ERR(vreg->reg);
  294. dev_err(dev, "failed to get %s, %d\n", name, err);
  295. goto out;
  296. }
  297. if (dev->of_node) {
  298. snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
  299. err = of_property_read_u32(dev->of_node,
  300. prop_name, &vreg->max_uA);
  301. if (err && err != -EINVAL) {
  302. dev_err(dev, "%s: failed to read %s\n",
  303. __func__, prop_name);
  304. goto out;
  305. } else if (err == -EINVAL || !vreg->max_uA) {
  306. if (!vreg->max_uA) {
  307. err = 0;
  308. } else if (regulator_count_voltages(vreg->reg) > 0) {
  309. dev_err(dev, "%s: %s is mandatory\n",
  310. __func__, prop_name);
  311. goto out;
  312. }
  313. err = 0;
  314. }
  315. }
  316. if (!strcmp(name, "vdda-pll")) {
  317. vreg->max_uV = VDDA_PLL_MAX_UV;
  318. vreg->min_uV = VDDA_PLL_MIN_UV;
  319. } else if (!strcmp(name, "vdda-phy")) {
  320. vreg->max_uV = VDDA_PHY_MAX_UV;
  321. vreg->min_uV = VDDA_PHY_MIN_UV;
  322. snprintf(prop_name, MAX_PROP_NAME, "%s-min-microvolt", name);
  323. of_property_read_u32(dev->of_node,
  324. prop_name, &vreg->min_uV);
  325. if (vreg->min_uV < VDDA_PHY_MIN_UV ||
  326. vreg->min_uV > VDDA_PHY_MAX_UV) {
  327. dev_err(dev, "%s: ufs vdda-phy invalid min_uV=%duV\n",
  328. __func__, vreg->min_uV);
  329. vreg->min_uV = VDDA_PHY_MIN_UV;
  330. }
  331. } else if (!strcmp(name, "vddp-ref-clk")) {
  332. vreg->max_uV = VDDP_REF_CLK_MAX_UV;
  333. vreg->min_uV = VDDP_REF_CLK_MIN_UV;
  334. } else if (!strcmp(name, "vdda-qref")) {
  335. vreg->max_uV = VDDA_QREF_MAX_UV;
  336. vreg->min_uV = VDDA_QREF_MIN_UV;
  337. }
  338. out:
  339. return err;
  340. }
  341. int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
  342. {
  343. int err;
  344. err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
  345. "vdda-pll");
  346. if (err)
  347. goto out;
  348. err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
  349. "vdda-phy");
  350. if (err)
  351. goto out;
  352. ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
  353. "vddp-ref-clk");
  354. ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdd_phy_gdsc,
  355. "vdd-phy-gdsc");
  356. ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_qref,
  357. "vdda-qref");
  358. out:
  359. return err;
  360. }
  361. EXPORT_SYMBOL(ufs_qcom_phy_init_vregulators);
  362. static int ufs_qcom_phy_cfg_vreg(struct device *dev,
  363. struct ufs_qcom_phy_vreg *vreg, bool on)
  364. {
  365. int ret = 0;
  366. struct regulator *reg = vreg->reg;
  367. const char *name = vreg->name;
  368. int min_uV;
  369. int uA_load;
  370. if (regulator_count_voltages(reg) > 0) {
  371. min_uV = on ? vreg->min_uV : 0;
  372. ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
  373. if (ret) {
  374. dev_err(dev, "%s: %s set voltage failed, err=%d\n",
  375. __func__, name, ret);
  376. goto out;
  377. }
  378. uA_load = on ? vreg->max_uA : 0;
  379. ret = regulator_set_load(reg, uA_load);
  380. if (ret >= 0) {
  381. /*
  382. * regulator_set_load() returns new regulator
  383. * mode upon success.
  384. */
  385. ret = 0;
  386. } else {
  387. dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
  388. __func__, name, uA_load, ret);
  389. goto out;
  390. }
  391. }
  392. out:
  393. return ret;
  394. }
  395. static int ufs_qcom_phy_enable_vreg(struct device *dev,
  396. struct ufs_qcom_phy_vreg *vreg)
  397. {
  398. int ret = 0;
  399. if (!vreg || vreg->enabled)
  400. goto out;
  401. ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
  402. if (ret) {
  403. dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
  404. __func__, ret);
  405. goto out;
  406. }
  407. ret = regulator_enable(vreg->reg);
  408. if (ret) {
  409. dev_err(dev, "%s: enable failed, err=%d\n",
  410. __func__, ret);
  411. goto out;
  412. }
  413. vreg->enabled = true;
  414. out:
  415. return ret;
  416. }
  417. static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
  418. {
  419. int ret = 0;
  420. if (phy->is_ref_clk_enabled)
  421. goto out;
  422. /*
  423. * "ref_clk_pad_en" is only required if UFS_PHY and UFS_REF_CLK_BSM
  424. * both needs to be enabled. Hence make sure that clk reference
  425. * is available before trying to enable the clock.
  426. */
  427. if (phy->ref_clk_pad_en) {
  428. ret = clk_prepare_enable(phy->ref_clk_pad_en);
  429. if (ret) {
  430. dev_err(phy->dev, "%s: ref_clk_pad_en enable failed %d\n",
  431. __func__, ret);
  432. goto out;
  433. }
  434. }
  435. /* qref clk signal is optional */
  436. if (phy->qref_clk)
  437. clk_prepare_enable(phy->qref_clk);
  438. /*
  439. * reference clock is propagated in a daisy-chained manner from
  440. * source to phy, so ungate them at each stage.
  441. */
  442. ret = clk_prepare_enable(phy->ref_clk_src);
  443. if (ret) {
  444. dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
  445. __func__, ret);
  446. goto out;
  447. }
  448. /*
  449. * "ref_clk_parent" is optional clock hence make sure that clk reference
  450. * is available before trying to enable the clock.
  451. */
  452. if (phy->ref_clk_parent) {
  453. ret = clk_prepare_enable(phy->ref_clk_parent);
  454. if (ret) {
  455. dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
  456. __func__, ret);
  457. goto out_disable_src;
  458. }
  459. }
  460. /*
  461. * "ref_clk" is optional clock hence make sure that clk reference
  462. * is available before trying to enable the clock.
  463. */
  464. if (phy->ref_clk) {
  465. ret = clk_prepare_enable(phy->ref_clk);
  466. if (ret) {
  467. dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
  468. __func__, ret);
  469. goto out_disable_parent;
  470. }
  471. }
  472. /*
  473. * "ref_aux_clk" is optional clock and only supported by certain
  474. * phy versions, hence make sure that clk reference is available
  475. * before trying to enable the clock.
  476. */
  477. if (phy->ref_aux_clk) {
  478. ret = clk_prepare_enable(phy->ref_aux_clk);
  479. if (ret) {
  480. dev_err(phy->dev, "%s: ref_aux_clk enable failed %d\n",
  481. __func__, ret);
  482. goto out_disable_ref;
  483. }
  484. }
  485. phy->is_ref_clk_enabled = true;
  486. goto out;
  487. out_disable_ref:
  488. if (phy->ref_clk)
  489. clk_disable_unprepare(phy->ref_clk);
  490. out_disable_parent:
  491. if (phy->ref_clk_parent)
  492. clk_disable_unprepare(phy->ref_clk_parent);
  493. out_disable_src:
  494. clk_disable_unprepare(phy->ref_clk_src);
  495. out:
  496. return ret;
  497. }
  498. static int ufs_qcom_phy_disable_vreg(struct device *dev,
  499. struct ufs_qcom_phy_vreg *vreg)
  500. {
  501. int ret = 0;
  502. if (!vreg || !vreg->enabled)
  503. goto out;
  504. ret = regulator_disable(vreg->reg);
  505. if (!ret) {
  506. /* ignore errors on applying disable config */
  507. ufs_qcom_phy_cfg_vreg(dev, vreg, false);
  508. vreg->enabled = false;
  509. } else {
  510. dev_err(dev, "%s: %s disable failed, err=%d\n",
  511. __func__, vreg->name, ret);
  512. }
  513. out:
  514. return ret;
  515. }
  516. static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
  517. {
  518. if (phy->is_ref_clk_enabled) {
  519. /*
  520. * "ref_aux_clk" is optional clock and only supported by
  521. * certain phy versions, hence make sure that clk reference
  522. * is available before trying to disable the clock.
  523. */
  524. if (phy->ref_aux_clk)
  525. clk_disable_unprepare(phy->ref_aux_clk);
  526. /*
  527. * "ref_clk" is optional clock hence make sure that clk
  528. * reference is available before trying to disable the clock.
  529. */
  530. if (phy->ref_clk)
  531. clk_disable_unprepare(phy->ref_clk);
  532. /*
  533. * "ref_clk_parent" is optional clock hence make sure that clk
  534. * reference is available before trying to disable the clock.
  535. */
  536. if (phy->ref_clk_parent)
  537. clk_disable_unprepare(phy->ref_clk_parent);
  538. clk_disable_unprepare(phy->ref_clk_src);
  539. /*
  540. * "ref_clk_pad_en" is optional clock hence make sure that clk
  541. * reference is available before trying to disable the clock.
  542. */
  543. if (phy->ref_clk_pad_en)
  544. clk_disable_unprepare(phy->ref_clk_pad_en);
  545. /* qref clk signal is optional */
  546. if (phy->qref_clk)
  547. clk_disable_unprepare(phy->qref_clk);
  548. phy->is_ref_clk_enabled = false;
  549. }
  550. }
  551. /* Turn ON M-PHY RMMI interface clocks */
  552. static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
  553. {
  554. int ret = 0;
  555. if (phy->is_iface_clk_enabled)
  556. goto out;
  557. if (!phy->tx_iface_clk)
  558. goto out;
  559. ret = clk_prepare_enable(phy->tx_iface_clk);
  560. if (ret) {
  561. dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
  562. __func__, ret);
  563. goto out;
  564. }
  565. ret = clk_prepare_enable(phy->rx_iface_clk);
  566. if (ret) {
  567. clk_disable_unprepare(phy->tx_iface_clk);
  568. dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
  569. __func__, ret);
  570. goto out;
  571. }
  572. phy->is_iface_clk_enabled = true;
  573. out:
  574. return ret;
  575. }
  576. /* Turn OFF M-PHY RMMI interface clocks */
  577. static void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
  578. {
  579. if (!phy->tx_iface_clk)
  580. return;
  581. if (phy->is_iface_clk_enabled) {
  582. clk_disable_unprepare(phy->tx_iface_clk);
  583. clk_disable_unprepare(phy->rx_iface_clk);
  584. phy->is_iface_clk_enabled = false;
  585. }
  586. }
  587. static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
  588. {
  589. int ret = 0;
  590. if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
  591. dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
  592. __func__);
  593. ret = -EOPNOTSUPP;
  594. } else {
  595. ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
  596. }
  597. return ret;
  598. }
  599. void ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
  600. {
  601. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  602. if (ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable)
  603. ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
  604. tx_lanes);
  605. }
  606. EXPORT_SYMBOL(ufs_qcom_phy_set_tx_lane_enable);
  607. int ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
  608. u8 major, u16 minor, u16 step)
  609. {
  610. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  611. if (!ufs_qcom_phy)
  612. return -EPROBE_DEFER;
  613. ufs_qcom_phy->host_ctrl_rev_major = major;
  614. ufs_qcom_phy->host_ctrl_rev_minor = minor;
  615. ufs_qcom_phy->host_ctrl_rev_step = step;
  616. return 0;
  617. }
  618. EXPORT_SYMBOL(ufs_qcom_phy_save_controller_version);
  619. void ufs_qcom_phy_set_src_clk_h8_enter(struct phy *generic_phy)
  620. {
  621. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  622. if (!ufs_qcom_phy->rx_sym0_mux_clk || !ufs_qcom_phy->rx_sym1_mux_clk ||
  623. !ufs_qcom_phy->tx_sym0_mux_clk || !ufs_qcom_phy->ref_clk_src)
  624. return;
  625. /*
  626. * Before entering hibernate, select xo as source of symbol
  627. * clocks according to the UFS Host Controller Hardware
  628. * Programming Guide's "Hibernate enter with power collapse".
  629. */
  630. clk_set_parent(ufs_qcom_phy->rx_sym0_mux_clk, ufs_qcom_phy->ref_clk_src);
  631. clk_set_parent(ufs_qcom_phy->rx_sym1_mux_clk, ufs_qcom_phy->ref_clk_src);
  632. clk_set_parent(ufs_qcom_phy->tx_sym0_mux_clk, ufs_qcom_phy->ref_clk_src);
  633. }
  634. EXPORT_SYMBOL(ufs_qcom_phy_set_src_clk_h8_enter);
  635. void ufs_qcom_phy_set_src_clk_h8_exit(struct phy *generic_phy)
  636. {
  637. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  638. if (!ufs_qcom_phy->rx_sym0_mux_clk ||
  639. !ufs_qcom_phy->rx_sym1_mux_clk ||
  640. !ufs_qcom_phy->tx_sym0_mux_clk ||
  641. !ufs_qcom_phy->rx_sym0_phy_clk ||
  642. !ufs_qcom_phy->rx_sym1_phy_clk ||
  643. !ufs_qcom_phy->tx_sym0_phy_clk)
  644. return;
  645. /*
  646. * Refer to the UFS Host Controller Hardware Programming Guide's
  647. * section "Hibernate exit from power collapse". Select phy clocks
  648. * as source of the PHY symbol clocks.
  649. */
  650. clk_set_parent(ufs_qcom_phy->rx_sym0_mux_clk, ufs_qcom_phy->rx_sym0_phy_clk);
  651. clk_set_parent(ufs_qcom_phy->rx_sym1_mux_clk, ufs_qcom_phy->rx_sym1_phy_clk);
  652. clk_set_parent(ufs_qcom_phy->tx_sym0_mux_clk, ufs_qcom_phy->tx_sym0_phy_clk);
  653. }
  654. EXPORT_SYMBOL(ufs_qcom_phy_set_src_clk_h8_exit);
  655. static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
  656. {
  657. if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
  658. dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
  659. __func__);
  660. return -EOPNOTSUPP;
  661. }
  662. return ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready(ufs_qcom_phy);
  663. }
  664. int ufs_qcom_phy_power_on(struct phy *generic_phy)
  665. {
  666. struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
  667. struct device *dev = phy_common->dev;
  668. int err;
  669. if (phy_common->vdd_phy_gdsc.reg) {
  670. err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdd_phy_gdsc);
  671. if (err) {
  672. dev_err(dev, "%s enable phy_gdsc failed, err=%d\n",
  673. __func__, err);
  674. goto out;
  675. }
  676. }
  677. if (phy_common->vdda_qref.reg) {
  678. err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_qref);
  679. if (err) {
  680. dev_err(dev, "%s enable vdda_qref failed, err=%d\n",
  681. __func__, err);
  682. goto out;
  683. }
  684. }
  685. err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
  686. if (err) {
  687. dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
  688. __func__, err);
  689. goto out;
  690. }
  691. phy_common->phy_spec_ops->power_control(phy_common, true);
  692. /* vdda_pll also enables ref clock LDOs so enable it first */
  693. err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
  694. if (err) {
  695. dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
  696. __func__, err);
  697. goto out_disable_phy;
  698. }
  699. err = ufs_qcom_phy_enable_iface_clk(phy_common);
  700. if (err) {
  701. dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
  702. __func__, err);
  703. goto out_disable_pll;
  704. }
  705. err = ufs_qcom_phy_enable_ref_clk(phy_common);
  706. if (err) {
  707. dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
  708. __func__, err);
  709. goto out_disable_iface_clk;
  710. }
  711. /* enable device PHY ref_clk pad rail */
  712. if (phy_common->vddp_ref_clk.reg) {
  713. err = ufs_qcom_phy_enable_vreg(dev,
  714. &phy_common->vddp_ref_clk);
  715. if (err) {
  716. dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
  717. __func__, err);
  718. goto out_disable_ref_clk;
  719. }
  720. }
  721. goto out;
  722. out_disable_ref_clk:
  723. ufs_qcom_phy_disable_ref_clk(phy_common);
  724. out_disable_iface_clk:
  725. ufs_qcom_phy_disable_iface_clk(phy_common);
  726. out_disable_pll:
  727. ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
  728. out_disable_phy:
  729. ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
  730. out:
  731. return err;
  732. }
  733. EXPORT_SYMBOL(ufs_qcom_phy_power_on);
  734. int ufs_qcom_phy_power_off(struct phy *generic_phy)
  735. {
  736. struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
  737. phy_common->phy_spec_ops->power_control(phy_common, false);
  738. if (phy_common->vddp_ref_clk.reg)
  739. ufs_qcom_phy_disable_vreg(phy_common->dev,
  740. &phy_common->vddp_ref_clk);
  741. ufs_qcom_phy_disable_ref_clk(phy_common);
  742. ufs_qcom_phy_disable_iface_clk(phy_common);
  743. ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
  744. ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
  745. if (phy_common->vdda_qref.reg)
  746. ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_qref);
  747. return 0;
  748. }
  749. EXPORT_SYMBOL(ufs_qcom_phy_power_off);
  750. void ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl)
  751. {
  752. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  753. if (ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg)
  754. ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg(ufs_qcom_phy, ctrl);
  755. }
  756. EXPORT_SYMBOL(ufs_qcom_phy_ctrl_rx_linecfg);
  757. int ufs_qcom_phy_get_tx_hs_equalizer(struct phy *generic_phy, u32 gear, u32 *val)
  758. {
  759. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  760. if (!ufs_qcom_phy->phy_spec_ops->get_tx_hs_equalizer)
  761. return -EOPNOTSUPP;
  762. *val = ufs_qcom_phy->phy_spec_ops->get_tx_hs_equalizer(ufs_qcom_phy, gear);
  763. return 0;
  764. }
  765. EXPORT_SYMBOL(ufs_qcom_phy_get_tx_hs_equalizer);
  766. int ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
  767. int len, char *prefix)
  768. {
  769. u32 *regs;
  770. size_t pos;
  771. if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
  772. return -EINVAL;
  773. regs = kzalloc(len, GFP_KERNEL);
  774. if (!regs)
  775. return -ENOMEM;
  776. for (pos = 0; pos < len; pos += 4)
  777. regs[pos / 4] = readl_relaxed(phy->mmio + offset + pos);
  778. print_hex_dump(KERN_ERR, prefix,
  779. len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
  780. 16, 4, regs, len, false);
  781. kfree(regs);
  782. return 0;
  783. }
  784. EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
  785. /**
  786. * ufs_qcom_phy_save_regs - save specified domain of ufs phy registers to memory
  787. * @phy - pointer to ufs qcom phy
  788. * @offset - register address offset
  789. * @len - size of this domain
  790. * @prefix - name of this domain
  791. */
  792. int ufs_qcom_phy_save_regs(struct ufs_qcom_phy *phy, int offset,
  793. int len, char *prefix)
  794. {
  795. struct ufs_qcom_phy_regs *regs = NULL;
  796. struct list_head *head = &phy->regs_list_head;
  797. size_t pos;
  798. unsigned int noio_flag;
  799. if (offset % 4 != 0 || len % 4 != 0)
  800. return -EINVAL;
  801. /* find the node if this register domain has been saved before */
  802. list_for_each_entry(regs, head, list)
  803. if (regs->prefix && !strcmp(regs->prefix, prefix))
  804. break;
  805. /* create a new node and add it to list if this domain never been written */
  806. if (&regs->list == head) {
  807. /*
  808. * use memalloc_noio_save() here as GFP_ATOMIC should not be invoked
  809. * in an IO error context
  810. */
  811. noio_flag = memalloc_noio_save();
  812. regs = devm_kzalloc(phy->dev, sizeof(*regs), GFP_ATOMIC);
  813. if (!regs)
  814. goto out;
  815. regs->ptr = devm_kzalloc(phy->dev, len, GFP_ATOMIC);
  816. if (!regs->ptr)
  817. goto out;
  818. memalloc_noio_restore(noio_flag);
  819. regs->prefix = prefix;
  820. regs->len = len;
  821. list_add_tail(&regs->list, &phy->regs_list_head);
  822. }
  823. for (pos = 0; pos < len; pos += 4)
  824. regs->ptr[pos / 4] = readl_relaxed(phy->mmio + offset + pos);
  825. return 0;
  826. out:
  827. memalloc_noio_restore(noio_flag);
  828. return -ENOMEM;
  829. }
  830. EXPORT_SYMBOL(ufs_qcom_phy_save_regs);
  831. void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
  832. {
  833. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  834. if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
  835. ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
  836. }
  837. EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);
  838. void ufs_qcom_phy_dbg_register_save(struct phy *generic_phy)
  839. {
  840. struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
  841. if (ufs_qcom_phy->phy_spec_ops->dbg_register_save)
  842. ufs_qcom_phy->phy_spec_ops->dbg_register_save(ufs_qcom_phy);
  843. }
  844. EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_save);
  845. MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
  846. MODULE_LICENSE("GPL v2");