phy-msm-qusb-v2.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/kernel.h>
  8. #include <linux/err.h>
  9. #include <linux/slab.h>
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/io.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/power_supply.h>
  16. #include <linux/regulator/consumer.h>
  17. #include <linux/regulator/driver.h>
  18. #include <linux/regulator/machine.h>
  19. #include <linux/usb/phy.h>
  20. #include <linux/usb/dwc3-msm.h>
  21. #include <linux/reset.h>
  22. #include <linux/debugfs.h>
  23. /* QUSB2PHY_PWR_CTRL1 register related bits */
  24. #define PWR_CTRL1_POWR_DOWN BIT(0)
  25. #define CLAMP_N_EN BIT(1)
  26. /* QUSB2PHY_PLL_COMMON_STATUS_ONE register related bits */
  27. #define CORE_READY_STATUS BIT(0)
  28. /* Get TUNE value from efuse bit-mask */
  29. #define TUNE_VAL_MASK(val, pos, mask) ((val >> pos) & mask)
  30. /* QUSB2PHY_INTR_CTRL register related bits */
  31. #define DMSE_INTR_HIGH_SEL BIT(4)
  32. #define DPSE_INTR_HIGH_SEL BIT(3)
  33. #define CHG_DET_INTR_EN BIT(2)
  34. #define DMSE_INTR_EN BIT(1)
  35. #define DPSE_INTR_EN BIT(0)
  36. /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE register related bits */
  37. #define CORE_PLL_RATE BIT(0)
  38. #define CORE_PLL_RATE_MUX BIT(1)
  39. #define CORE_PLL_EN BIT(2)
  40. #define CORE_PLL_EN_MUX BIT(3)
  41. #define CORE_PLL_EN_FROM_RESET BIT(4)
  42. #define CORE_RESET BIT(5)
  43. #define CORE_RESET_MUX BIT(6)
  44. #define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
  45. #define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
  46. #define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */
  47. #define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */
  48. #define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */
  49. #define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */
  50. #define QUSB2PHY_REFGEN_VOL_MIN 1200000 /* uV */
  51. #define QUSB2PHY_REFGEN_VOL_MAX 1200000 /* uV */
  52. #define QUSB2PHY_REFGEN_HPM_LOAD 30000 /* uA */
  53. #define LINESTATE_DP BIT(0)
  54. #define LINESTATE_DM BIT(1)
  55. #define BIAS_CTRL_2_OVERRIDE_VAL 0x28
  56. #define DEBUG_CTRL1_OVERRIDE_VAL 0x09
  57. /* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */
  58. #define BANDGAP_BYPASS BIT(0)
  59. /* DEBUG_CTRL2 register value to program VSTATUS MUX for PHY status */
  60. #define DEBUG_CTRL2_MUX_PLL_LOCK_STATUS 0x4
  61. /* STAT5 register bits */
  62. #define VSTATUS_PLL_LOCK_STATUS_MASK BIT(0)
  63. /* DEBUG_CTRL4 register bits */
  64. #define FORCED_UTMI_DPPULLDOWN BIT(2)
  65. #define FORCED_UTMI_DMPULLDOWN BIT(3)
  66. enum qusb_phy_reg {
  67. PORT_TUNE1,
  68. PLL_COMMON_STATUS_ONE,
  69. PWR_CTRL1,
  70. INTR_CTRL,
  71. PLL_CORE_INPUT_OVERRIDE,
  72. TEST1,
  73. BIAS_CTRL_2,
  74. DEBUG_CTRL1,
  75. DEBUG_CTRL2,
  76. DEBUG_CTRL3,
  77. DEBUG_CTRL4,
  78. STAT5,
  79. USB2_PHY_REG_MAX,
  80. };
  81. struct qusb_phy {
  82. struct usb_phy phy;
  83. struct mutex lock;
  84. void __iomem *base;
  85. void __iomem *efuse_reg;
  86. void __iomem *refgen_north_bg_reg;
  87. void __iomem *eud_enable_reg;
  88. struct clk *ref_clk_src;
  89. struct clk *ref_clk;
  90. struct clk *cfg_ahb_clk;
  91. struct reset_control *phy_reset;
  92. struct regulator *vdd;
  93. struct regulator *vdda33;
  94. struct regulator *vdda18;
  95. struct regulator *refgen;
  96. int vdd_levels[3]; /* none, low, high */
  97. int init_seq_len;
  98. int *qusb_phy_init_seq;
  99. int host_init_seq_len;
  100. int *qusb_phy_host_init_seq;
  101. unsigned int *phy_reg;
  102. int qusb_phy_reg_offset_cnt;
  103. u32 tune_val;
  104. int efuse_bit_pos;
  105. int efuse_num_of_bits;
  106. bool cable_connected;
  107. bool suspended;
  108. bool dpdm_enable;
  109. struct regulator_desc dpdm_rdesc;
  110. struct regulator_dev *dpdm_rdev;
  111. /* emulation targets specific */
  112. void __iomem *emu_phy_base;
  113. bool emulation;
  114. int *emu_init_seq;
  115. int emu_init_seq_len;
  116. int *phy_pll_reset_seq;
  117. int phy_pll_reset_seq_len;
  118. int *emu_dcm_reset_seq;
  119. int emu_dcm_reset_seq_len;
  120. /* override TUNEX registers value */
  121. struct dentry *root;
  122. u8 tune[5];
  123. u8 bias_ctrl2;
  124. bool override_bias_ctrl2;
  125. bool power_enabled;
  126. bool clocks_enabled;
  127. };
  128. static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
  129. {
  130. dev_dbg(qphy->phy.dev, "%s(): on:%d\n", __func__, on);
  131. if (qphy->clocks_enabled == on)
  132. return;
  133. if (on) {
  134. clk_prepare_enable(qphy->ref_clk_src);
  135. if (qphy->ref_clk)
  136. clk_prepare_enable(qphy->ref_clk);
  137. if (qphy->cfg_ahb_clk)
  138. clk_prepare_enable(qphy->cfg_ahb_clk);
  139. } else {
  140. if (qphy->cfg_ahb_clk)
  141. clk_disable_unprepare(qphy->cfg_ahb_clk);
  142. if (qphy->ref_clk)
  143. clk_disable_unprepare(qphy->ref_clk);
  144. clk_disable_unprepare(qphy->ref_clk_src);
  145. }
  146. qphy->clocks_enabled = on;
  147. }
  148. static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
  149. {
  150. int min, ret;
  151. min = high ? 1 : 0; /* low or none? */
  152. ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
  153. qphy->vdd_levels[2]);
  154. if (ret) {
  155. dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
  156. return ret;
  157. }
  158. dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
  159. qphy->vdd_levels[min], qphy->vdd_levels[2]);
  160. return ret;
  161. }
  162. static int qusb_phy_disable_power(struct qusb_phy *qphy)
  163. {
  164. int ret = 0;
  165. mutex_lock(&qphy->lock);
  166. if (!qphy->power_enabled) {
  167. mutex_unlock(&qphy->lock);
  168. return 0;
  169. }
  170. dev_dbg(qphy->phy.dev, "%s:req to turn off regulators\n",
  171. __func__);
  172. ret = regulator_disable(qphy->refgen);
  173. if (ret)
  174. dev_err(qphy->phy.dev, "Unable to disable refgen:%d\n", ret);
  175. if (!regulator_is_enabled(qphy->refgen)) {
  176. ret = regulator_set_voltage(qphy->refgen, 0,
  177. QUSB2PHY_REFGEN_VOL_MAX);
  178. if (ret)
  179. dev_err(qphy->phy.dev,
  180. "Unable to set (0) voltage for refgen:%d\n",
  181. ret);
  182. ret = regulator_set_load(qphy->refgen, 0);
  183. if (ret < 0)
  184. dev_err(qphy->phy.dev,
  185. "Unable to set (0) HPM of refgen\n");
  186. }
  187. ret = regulator_disable(qphy->vdda33);
  188. if (ret)
  189. dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
  190. if (!regulator_is_enabled(qphy->vdda33)) {
  191. ret = regulator_set_voltage(qphy->vdda33, 0,
  192. QUSB2PHY_3P3_VOL_MAX);
  193. if (ret)
  194. dev_err(qphy->phy.dev,
  195. "Unable to set (0) voltage for vdda33:%d\n",
  196. ret);
  197. ret = regulator_set_load(qphy->vdda33, 0);
  198. if (ret < 0)
  199. dev_err(qphy->phy.dev,
  200. "Unable to set (0) HPM of vdda33\n");
  201. }
  202. ret = regulator_disable(qphy->vdda18);
  203. if (ret)
  204. dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
  205. if (!regulator_is_enabled(qphy->vdda18)) {
  206. ret = regulator_set_voltage(qphy->vdda18, 0,
  207. QUSB2PHY_1P8_VOL_MAX);
  208. if (ret)
  209. dev_err(qphy->phy.dev,
  210. "Unable to set (0) voltage for vdda18:%d\n", ret);
  211. ret = regulator_set_load(qphy->vdda18, 0);
  212. if (ret < 0)
  213. dev_err(qphy->phy.dev,
  214. "Unable to set LPM of vdda18\n");
  215. }
  216. ret = regulator_disable(qphy->vdd);
  217. if (ret)
  218. dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n", ret);
  219. if (!regulator_is_enabled(qphy->vdd)) {
  220. ret = qusb_phy_config_vdd(qphy, false);
  221. if (ret)
  222. dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
  223. ret);
  224. }
  225. pr_debug("%s(): QUSB PHY's regulators are turned OFF.\n", __func__);
  226. qphy->power_enabled = false;
  227. mutex_unlock(&qphy->lock);
  228. return ret;
  229. }
  230. static int qusb_phy_enable_power(struct qusb_phy *qphy)
  231. {
  232. int ret = 0;
  233. mutex_lock(&qphy->lock);
  234. if (qphy->power_enabled) {
  235. mutex_unlock(&qphy->lock);
  236. return 0;
  237. }
  238. dev_dbg(qphy->phy.dev, "%s:req to turn on regulators\n",
  239. __func__);
  240. ret = qusb_phy_config_vdd(qphy, true);
  241. if (ret) {
  242. dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
  243. ret);
  244. goto err_vdd;
  245. }
  246. ret = regulator_enable(qphy->vdd);
  247. if (ret) {
  248. dev_err(qphy->phy.dev, "Unable to enable VDD\n");
  249. goto unconfig_vdd;
  250. }
  251. ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
  252. if (ret < 0) {
  253. dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
  254. goto disable_vdd;
  255. }
  256. ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
  257. QUSB2PHY_1P8_VOL_MAX);
  258. if (ret) {
  259. dev_err(qphy->phy.dev,
  260. "Unable to set voltage for vdda18:%d\n", ret);
  261. goto put_vdda18_lpm;
  262. }
  263. ret = regulator_enable(qphy->vdda18);
  264. if (ret) {
  265. dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
  266. goto unset_vdda18;
  267. }
  268. ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
  269. if (ret < 0) {
  270. dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
  271. goto disable_vdda18;
  272. }
  273. ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
  274. QUSB2PHY_3P3_VOL_MAX);
  275. if (ret) {
  276. dev_err(qphy->phy.dev,
  277. "Unable to set voltage for vdda33:%d\n", ret);
  278. goto put_vdda33_lpm;
  279. }
  280. ret = regulator_enable(qphy->vdda33);
  281. if (ret) {
  282. dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
  283. goto unset_vdd33;
  284. }
  285. ret = regulator_set_load(qphy->refgen, QUSB2PHY_REFGEN_HPM_LOAD);
  286. if (ret < 0) {
  287. dev_err(qphy->phy.dev, "Unable to set HPM of refgen:%d\n", ret);
  288. goto disable_vdd33;
  289. }
  290. ret = regulator_set_voltage(qphy->refgen, QUSB2PHY_REFGEN_VOL_MIN,
  291. QUSB2PHY_REFGEN_VOL_MAX);
  292. if (ret) {
  293. dev_err(qphy->phy.dev,
  294. "Unable to set voltage for refgen:%d\n", ret);
  295. goto put_refgen_lpm;
  296. }
  297. ret = regulator_enable(qphy->refgen);
  298. if (ret) {
  299. dev_err(qphy->phy.dev, "Unable to enable refgen\n");
  300. goto unset_refgen;
  301. }
  302. pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
  303. qphy->power_enabled = true;
  304. mutex_unlock(&qphy->lock);
  305. return ret;
  306. unset_refgen:
  307. ret = regulator_set_voltage(qphy->refgen, 0, QUSB2PHY_REFGEN_VOL_MAX);
  308. if (ret)
  309. dev_err(qphy->phy.dev,
  310. "Unable to set (0) voltage for refgen:%d\n", ret);
  311. put_refgen_lpm:
  312. ret = regulator_set_load(qphy->refgen, 0);
  313. if (ret < 0)
  314. dev_err(qphy->phy.dev, "Unable to set (0) HPM of refgen\n");
  315. disable_vdd33:
  316. ret = regulator_disable(qphy->vdda33);
  317. if (ret)
  318. dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
  319. unset_vdd33:
  320. ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
  321. if (ret)
  322. dev_err(qphy->phy.dev,
  323. "Unable to set (0) voltage for vdda33:%d\n", ret);
  324. put_vdda33_lpm:
  325. ret = regulator_set_load(qphy->vdda33, 0);
  326. if (ret < 0)
  327. dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
  328. disable_vdda18:
  329. ret = regulator_disable(qphy->vdda18);
  330. if (ret)
  331. dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
  332. unset_vdda18:
  333. ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
  334. if (ret)
  335. dev_err(qphy->phy.dev,
  336. "Unable to set (0) voltage for vdda18:%d\n", ret);
  337. put_vdda18_lpm:
  338. ret = regulator_set_load(qphy->vdda18, 0);
  339. if (ret < 0)
  340. dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
  341. disable_vdd:
  342. ret = regulator_disable(qphy->vdd);
  343. if (ret)
  344. dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
  345. ret);
  346. unconfig_vdd:
  347. ret = qusb_phy_config_vdd(qphy, false);
  348. if (ret)
  349. dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
  350. ret);
  351. err_vdd:
  352. mutex_unlock(&qphy->lock);
  353. return ret;
  354. }
  355. static void qusb_phy_get_tune1_param(struct qusb_phy *qphy)
  356. {
  357. u8 reg;
  358. u32 bit_mask = 1;
  359. pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
  360. qphy->efuse_num_of_bits,
  361. qphy->efuse_bit_pos);
  362. /* get bit mask based on number of bits to use with efuse reg */
  363. bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1;
  364. /*
  365. * For 8nm zero is treated as a valid efuse value and driver
  366. * should program the tune1 reg based on efuse value
  367. */
  368. qphy->tune_val = readl_relaxed(qphy->efuse_reg);
  369. pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n",
  370. __func__, bit_mask, qphy->tune_val);
  371. qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
  372. qphy->efuse_bit_pos, bit_mask);
  373. reg = readb_relaxed(qphy->base + qphy->phy_reg[PORT_TUNE1]);
  374. reg = reg & 0x0f;
  375. reg |= (qphy->tune_val << 4);
  376. qphy->tune_val = reg;
  377. }
  378. static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
  379. unsigned long delay)
  380. {
  381. int i;
  382. pr_debug("Seq count:%d\n", cnt);
  383. for (i = 0; i < cnt; i = i+2) {
  384. pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
  385. writel_relaxed(seq[i], base + seq[i+1]);
  386. if (delay)
  387. usleep_range(delay, (delay + 2000));
  388. }
  389. }
  390. static void msm_usb_write_readback(void __iomem *base, u32 offset,
  391. const u32 mask, u32 val)
  392. {
  393. u32 write_val, tmp = readl_relaxed(base + offset);
  394. tmp &= ~mask; /* retain other bits */
  395. write_val = tmp | val;
  396. writel_relaxed(write_val, base + offset);
  397. /* Read back to see if val was written */
  398. tmp = readl_relaxed(base + offset);
  399. tmp &= mask; /* clear other bits */
  400. if (tmp != val)
  401. pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
  402. __func__, val, offset);
  403. }
  404. static void qusb_phy_reset(struct qusb_phy *qphy)
  405. {
  406. int ret;
  407. ret = reset_control_assert(qphy->phy_reset);
  408. if (ret)
  409. dev_err(qphy->phy.dev, "%s: phy_reset assert failed\n",
  410. __func__);
  411. usleep_range(100, 150);
  412. ret = reset_control_deassert(qphy->phy_reset);
  413. if (ret)
  414. dev_err(qphy->phy.dev, "%s: phy_reset deassert failed\n",
  415. __func__);
  416. }
  417. static bool qusb_phy_pll_locked(struct qusb_phy *qphy)
  418. {
  419. u32 val;
  420. writel_relaxed(DEBUG_CTRL2_MUX_PLL_LOCK_STATUS,
  421. qphy->base + qphy->phy_reg[DEBUG_CTRL2]);
  422. val = readl_relaxed(qphy->base + qphy->phy_reg[STAT5]);
  423. return (val & VSTATUS_PLL_LOCK_STATUS_MASK);
  424. }
  425. static void qusb_phy_host_init(struct usb_phy *phy)
  426. {
  427. u8 reg;
  428. int p_index;
  429. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  430. qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
  431. qphy->host_init_seq_len, 0);
  432. if (qphy->efuse_reg) {
  433. if (!qphy->tune_val)
  434. qusb_phy_get_tune1_param(qphy);
  435. } else {
  436. /* For non fused chips we need to write the TUNE1 param as
  437. * specified in DT otherwise we will end up writing 0 to
  438. * TUNE1
  439. */
  440. qphy->tune_val = readb_relaxed(qphy->base +
  441. qphy->phy_reg[PORT_TUNE1]);
  442. }
  443. writel_relaxed(qphy->tune_val | BIT(7),
  444. qphy->base + qphy->phy_reg[PORT_TUNE1]);
  445. pr_debug("%s(): Programming TUNE1 parameter as:%x\n",
  446. __func__, readb_relaxed(qphy->base +
  447. qphy->phy_reg[PORT_TUNE1]));
  448. writel_relaxed(DEBUG_CTRL1_OVERRIDE_VAL,
  449. qphy->base + qphy->phy_reg[DEBUG_CTRL1]);
  450. /* if debugfs based tunex params are set, use that value. */
  451. for (p_index = 0; p_index < 5; p_index++) {
  452. if (qphy->tune[p_index])
  453. writel_relaxed(qphy->tune[p_index],
  454. qphy->base + qphy->phy_reg[PORT_TUNE1] +
  455. (4 * p_index));
  456. }
  457. if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2)
  458. if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
  459. writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
  460. qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
  461. if (qphy->bias_ctrl2)
  462. writel_relaxed(qphy->bias_ctrl2,
  463. qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
  464. /* Ensure above write is completed before turning ON ref clk */
  465. wmb();
  466. /* Require to get phy pll lock successfully */
  467. usleep_range(150, 160);
  468. reg = readb_relaxed(qphy->base + qphy->phy_reg[PLL_COMMON_STATUS_ONE]);
  469. dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
  470. if (!(reg & CORE_READY_STATUS))
  471. dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
  472. }
  473. static int qusb_phy_init(struct usb_phy *phy)
  474. {
  475. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  476. int p_index;
  477. u8 reg;
  478. if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) {
  479. dev_err(qphy->phy.dev, "eud is enabled\n");
  480. return 0;
  481. }
  482. qusb_phy_enable_power(qphy);
  483. qusb_phy_enable_clocks(qphy, true);
  484. qusb_phy_reset(qphy);
  485. if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE) {
  486. qusb_phy_host_init(phy);
  487. return 0;
  488. }
  489. if (qphy->emulation) {
  490. if (qphy->emu_init_seq)
  491. qusb_phy_write_seq(qphy->emu_phy_base + 0x8000,
  492. qphy->emu_init_seq,
  493. qphy->emu_init_seq_len, 10000);
  494. if (qphy->qusb_phy_init_seq)
  495. qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
  496. qphy->init_seq_len, 0);
  497. /* Wait for 5ms as per QUSB2 RUMI sequence */
  498. usleep_range(5000, 7000);
  499. if (qphy->phy_pll_reset_seq)
  500. qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
  501. qphy->phy_pll_reset_seq_len, 10000);
  502. if (qphy->emu_dcm_reset_seq)
  503. qusb_phy_write_seq(qphy->emu_phy_base,
  504. qphy->emu_dcm_reset_seq,
  505. qphy->emu_dcm_reset_seq_len, 10000);
  506. return 0;
  507. }
  508. /* Disable the PHY */
  509. writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) |
  510. PWR_CTRL1_POWR_DOWN,
  511. qphy->base + qphy->phy_reg[PWR_CTRL1]);
  512. if (qphy->qusb_phy_init_seq)
  513. qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
  514. qphy->init_seq_len, 0);
  515. if (qphy->efuse_reg) {
  516. if (!qphy->tune_val)
  517. qusb_phy_get_tune1_param(qphy);
  518. pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
  519. qphy->tune_val);
  520. writel_relaxed(qphy->tune_val,
  521. qphy->base + qphy->phy_reg[PORT_TUNE1]);
  522. }
  523. /* if debugfs based tunex params are set, use that value. */
  524. for (p_index = 0; p_index < 5; p_index++) {
  525. if (qphy->tune[p_index])
  526. writel_relaxed(qphy->tune[p_index],
  527. qphy->base + qphy->phy_reg[PORT_TUNE1] +
  528. (4 * p_index));
  529. }
  530. if (qphy->refgen_north_bg_reg && qphy->override_bias_ctrl2)
  531. if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
  532. writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
  533. qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
  534. if (qphy->bias_ctrl2)
  535. writel_relaxed(qphy->bias_ctrl2,
  536. qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
  537. /* ensure above writes are completed before re-enabling PHY */
  538. wmb();
  539. /* Enable the PHY */
  540. writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) &
  541. ~PWR_CTRL1_POWR_DOWN,
  542. qphy->base + qphy->phy_reg[PWR_CTRL1]);
  543. /* Ensure above write is completed before turning ON ref clk */
  544. wmb();
  545. /* Require to get phy pll lock successfully */
  546. usleep_range(150, 160);
  547. reg = readb_relaxed(qphy->base + qphy->phy_reg[PLL_COMMON_STATUS_ONE]);
  548. dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
  549. if (!(reg & CORE_READY_STATUS)) {
  550. dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
  551. WARN_ON(1);
  552. }
  553. return 0;
  554. }
  555. static void qusb_phy_shutdown(struct usb_phy *phy)
  556. {
  557. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  558. qusb_phy_disable_power(qphy);
  559. }
  560. static u32 qusb_phy_get_linestate(struct qusb_phy *qphy)
  561. {
  562. u32 linestate = 0;
  563. if (qphy->cable_connected) {
  564. if (qphy->phy.flags & PHY_HSFS_MODE)
  565. linestate |= LINESTATE_DP;
  566. else if (qphy->phy.flags & PHY_LS_MODE)
  567. linestate |= LINESTATE_DM;
  568. }
  569. return linestate;
  570. }
  571. /**
  572. * Performs QUSB2 PHY suspend/resume functionality.
  573. *
  574. * @uphy - usb phy pointer.
  575. * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
  576. *
  577. */
  578. static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
  579. {
  580. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  581. u32 linestate = 0, intr_mask = 0;
  582. if (qphy->suspended == suspend) {
  583. if (qphy->phy.flags & PHY_SUS_OVERRIDE)
  584. goto suspend;
  585. dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
  586. __func__);
  587. return 0;
  588. }
  589. suspend:
  590. if (suspend) { /* Bus suspend case */
  591. /*
  592. * The HUB class drivers calls usb_phy_notify_disconnect() upon a device
  593. * disconnect. Consider a scenario where a USB device is disconnected without
  594. * detaching the OTG cable. phy->cable_connected is marked false due to above
  595. * mentioned call path. Now, while entering low power mode (host bus suspend),
  596. * we come here and turn off regulators thinking no cable is connected. Prevent
  597. * this by not turning off regulators while in host mode.
  598. */
  599. if (qphy->cable_connected || (qphy->phy.flags & PHY_HOST_MODE)) {
  600. /* Disable all interrupts */
  601. writel_relaxed(0x00,
  602. qphy->base + qphy->phy_reg[INTR_CTRL]);
  603. linestate = qusb_phy_get_linestate(qphy);
  604. /*
  605. * D+/D- interrupts are level-triggered, but we are
  606. * only interested if the line state changes, so enable
  607. * the high/low trigger based on current state. In
  608. * other words, enable the triggers _opposite_ of what
  609. * the current D+/D- levels are.
  610. * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
  611. * configure the mask to trigger on D+ low OR D- high
  612. */
  613. intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
  614. if (!(linestate & LINESTATE_DP)) /* D+ low */
  615. intr_mask |= DPSE_INTR_HIGH_SEL;
  616. if (!(linestate & LINESTATE_DM)) /* D- low */
  617. intr_mask |= DMSE_INTR_HIGH_SEL;
  618. writel_relaxed(intr_mask,
  619. qphy->base + qphy->phy_reg[INTR_CTRL]);
  620. if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
  621. /* enable phy auto-resume */
  622. writel_relaxed(0x91,
  623. qphy->base + qphy->phy_reg[TEST1]);
  624. /* Delay recommended between TEST1 writes */
  625. usleep_range(10, 20);
  626. writel_relaxed(0x90,
  627. qphy->base + qphy->phy_reg[TEST1]);
  628. }
  629. dev_dbg(phy->dev, "%s: intr_mask = %x\n",
  630. __func__, intr_mask);
  631. /* Makes sure that above write goes through */
  632. wmb();
  633. qusb_phy_enable_clocks(qphy, false);
  634. } else { /* Cable disconnect case */
  635. /* Disable all interrupts */
  636. dev_dbg(phy->dev, "%s: phy->flags:0x%x\n",
  637. __func__, qphy->phy.flags);
  638. if (!(qphy->phy.flags & EUD_SPOOF_DISCONNECT)) {
  639. dev_dbg(phy->dev, "turning off clocks/ldo\n");
  640. writel_relaxed(0x00,
  641. qphy->base + qphy->phy_reg[INTR_CTRL]);
  642. qusb_phy_reset(qphy);
  643. qusb_phy_enable_clocks(qphy, false);
  644. qusb_phy_disable_power(qphy);
  645. }
  646. }
  647. qphy->suspended = true;
  648. } else {
  649. /* Bus resume case */
  650. if (qphy->cable_connected) {
  651. qusb_phy_enable_clocks(qphy, true);
  652. /* Clear all interrupts on resume */
  653. writel_relaxed(0x00,
  654. qphy->base + qphy->phy_reg[INTR_CTRL]);
  655. /* Reset PLL if needed */
  656. if (!qusb_phy_pll_locked(qphy)) {
  657. dev_dbg(phy->dev, "%s: reset PLL\n", __func__);
  658. /* hold core PLL into reset */
  659. writel_relaxed(CORE_PLL_EN_FROM_RESET |
  660. CORE_RESET | CORE_RESET_MUX,
  661. qphy->base +
  662. qphy->phy_reg[PLL_CORE_INPUT_OVERRIDE]);
  663. /* Wait for PLL to get reset */
  664. usleep_range(10, 20);
  665. /* bring core PLL out of reset */
  666. writel_relaxed(CORE_PLL_EN_FROM_RESET,
  667. qphy->base +
  668. qphy->phy_reg[PLL_CORE_INPUT_OVERRIDE]);
  669. /* Makes sure that above write goes through */
  670. wmb();
  671. }
  672. } else { /* Cable connect case */
  673. qusb_phy_enable_power(qphy);
  674. qusb_phy_enable_clocks(qphy, true);
  675. }
  676. qphy->suspended = false;
  677. }
  678. return 0;
  679. }
  680. static int qusb_phy_notify_connect(struct usb_phy *phy,
  681. enum usb_device_speed speed)
  682. {
  683. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  684. qphy->cable_connected = true;
  685. dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
  686. qphy->cable_connected);
  687. return 0;
  688. }
  689. static int qusb_phy_notify_disconnect(struct usb_phy *phy,
  690. enum usb_device_speed speed)
  691. {
  692. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  693. qphy->cable_connected = false;
  694. dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
  695. qphy->cable_connected);
  696. return 0;
  697. }
  698. #define DP_PULSE_WIDTH_MSEC 200
  699. static enum usb_charger_type usb_phy_drive_dp_pulse(struct usb_phy *phy)
  700. {
  701. struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
  702. int ret;
  703. ret = qusb_phy_enable_power(qphy);
  704. if (ret < 0) {
  705. dev_dbg(qphy->phy.dev,
  706. "dpdm regulator enable failed:%d\n", ret);
  707. return 0;
  708. }
  709. qusb_phy_enable_clocks(qphy, true);
  710. msm_usb_write_readback(qphy->base, qphy->phy_reg[PWR_CTRL1],
  711. PWR_CTRL1_POWR_DOWN, 0x00);
  712. msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL4],
  713. FORCED_UTMI_DPPULLDOWN, 0x00);
  714. msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL4],
  715. FORCED_UTMI_DMPULLDOWN,
  716. FORCED_UTMI_DMPULLDOWN);
  717. msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL3],
  718. 0xd1, 0xd1);
  719. msm_usb_write_readback(qphy->base, qphy->phy_reg[PWR_CTRL1],
  720. CLAMP_N_EN, CLAMP_N_EN);
  721. msm_usb_write_readback(qphy->base, qphy->phy_reg[INTR_CTRL],
  722. DPSE_INTR_HIGH_SEL, 0x00);
  723. msm_usb_write_readback(qphy->base, qphy->phy_reg[INTR_CTRL],
  724. DPSE_INTR_EN, DPSE_INTR_EN);
  725. msleep(DP_PULSE_WIDTH_MSEC);
  726. msm_usb_write_readback(qphy->base, qphy->phy_reg[INTR_CTRL],
  727. DPSE_INTR_HIGH_SEL |
  728. DPSE_INTR_EN, 0x00);
  729. msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL3],
  730. 0xd1, 0x00);
  731. msm_usb_write_readback(qphy->base, qphy->phy_reg[DEBUG_CTRL4],
  732. FORCED_UTMI_DPPULLDOWN |
  733. FORCED_UTMI_DMPULLDOWN, 0x00);
  734. msm_usb_write_readback(qphy->base, qphy->phy_reg[PWR_CTRL1],
  735. PWR_CTRL1_POWR_DOWN |
  736. CLAMP_N_EN, 0x00);
  737. msleep(20);
  738. qusb_phy_enable_clocks(qphy, false);
  739. ret = qusb_phy_disable_power(qphy);
  740. if (ret < 0) {
  741. dev_dbg(qphy->phy.dev,
  742. "dpdm regulator disable failed:%d\n", ret);
  743. }
  744. return 0;
  745. }
  746. static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
  747. {
  748. int ret = 0;
  749. struct qusb_phy *qphy = rdev_get_drvdata(rdev);
  750. dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
  751. __func__, qphy->dpdm_enable);
  752. if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) {
  753. dev_err(qphy->phy.dev, "eud is enabled\n");
  754. return 0;
  755. }
  756. if (!qphy->dpdm_enable) {
  757. ret = qusb_phy_enable_power(qphy);
  758. if (ret < 0) {
  759. dev_dbg(qphy->phy.dev,
  760. "dpdm regulator enable failed:%d\n", ret);
  761. return ret;
  762. }
  763. qphy->dpdm_enable = true;
  764. qusb_phy_reset(qphy);
  765. }
  766. return ret;
  767. }
  768. static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
  769. {
  770. int ret = 0;
  771. struct qusb_phy *qphy = rdev_get_drvdata(rdev);
  772. dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
  773. __func__, qphy->dpdm_enable);
  774. if (qphy->dpdm_enable) {
  775. ret = qusb_phy_disable_power(qphy);
  776. if (ret < 0) {
  777. dev_dbg(qphy->phy.dev,
  778. "dpdm regulator disable failed:%d\n", ret);
  779. return ret;
  780. }
  781. qphy->dpdm_enable = false;
  782. }
  783. return ret;
  784. }
  785. static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
  786. {
  787. struct qusb_phy *qphy = rdev_get_drvdata(rdev);
  788. dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
  789. qphy->dpdm_enable);
  790. return qphy->dpdm_enable;
  791. }
  792. static const struct regulator_ops qusb_phy_dpdm_regulator_ops = {
  793. .enable = qusb_phy_dpdm_regulator_enable,
  794. .disable = qusb_phy_dpdm_regulator_disable,
  795. .is_enabled = qusb_phy_dpdm_regulator_is_enabled,
  796. };
  797. static int qusb_phy_regulator_init(struct qusb_phy *qphy)
  798. {
  799. struct device *dev = qphy->phy.dev;
  800. struct regulator_config cfg = {};
  801. struct regulator_init_data *init_data;
  802. init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
  803. if (!init_data)
  804. return -ENOMEM;
  805. init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
  806. qphy->dpdm_rdesc.owner = THIS_MODULE;
  807. qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
  808. qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
  809. qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
  810. cfg.dev = dev;
  811. cfg.init_data = init_data;
  812. cfg.driver_data = qphy;
  813. cfg.of_node = dev->of_node;
  814. qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
  815. return PTR_ERR_OR_ZERO(qphy->dpdm_rdev);
  816. }
  817. static int qusb_phy_create_debugfs(struct qusb_phy *qphy)
  818. {
  819. int ret = 0, i;
  820. char name[6];
  821. qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL);
  822. if (IS_ERR_OR_NULL(qphy->root)) {
  823. dev_err(qphy->phy.dev,
  824. "can't create debugfs root for %s\n",
  825. dev_name(qphy->phy.dev));
  826. ret = -ENOMEM;
  827. goto create_err;
  828. }
  829. for (i = 0; i < 5; i++) {
  830. snprintf(name, sizeof(name), "tune%d", (i + 1));
  831. debugfs_create_x8(name, 0644, qphy->root,
  832. &qphy->tune[i]);
  833. }
  834. debugfs_create_x8("bias_ctrl2", 0644, qphy->root,
  835. &qphy->bias_ctrl2);
  836. create_err:
  837. return ret;
  838. }
  839. static int qusb2_get_regulators(struct qusb_phy *qphy)
  840. {
  841. struct device *dev = qphy->phy.dev;
  842. qphy->vdd = devm_regulator_get(dev, "vdd");
  843. if (IS_ERR(qphy->vdd)) {
  844. dev_err(dev, "unable to get vdd supply\n");
  845. return PTR_ERR(qphy->vdd);
  846. }
  847. qphy->vdda33 = devm_regulator_get(dev, "vdda33");
  848. if (IS_ERR(qphy->vdda33)) {
  849. dev_err(dev, "unable to get vdda33 supply\n");
  850. return PTR_ERR(qphy->vdda33);
  851. }
  852. qphy->vdda18 = devm_regulator_get(dev, "vdda18");
  853. if (IS_ERR(qphy->vdda18)) {
  854. dev_err(dev, "unable to get vdda18 supply\n");
  855. return PTR_ERR(qphy->vdda18);
  856. }
  857. qphy->refgen = devm_regulator_get(dev, "refgen");
  858. if (IS_ERR(qphy->refgen)) {
  859. dev_err(dev, "unable to get refgen supply\n");
  860. return PTR_ERR(qphy->refgen);
  861. }
  862. return 0;
  863. }
  864. static int qusb_phy_probe(struct platform_device *pdev)
  865. {
  866. struct qusb_phy *qphy;
  867. struct device *dev = &pdev->dev;
  868. struct resource *res;
  869. int ret = 0, size = 0;
  870. qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
  871. if (!qphy)
  872. return -ENOMEM;
  873. qphy->phy.dev = dev;
  874. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  875. "qusb_phy_base");
  876. qphy->base = devm_ioremap_resource(dev, res);
  877. if (IS_ERR(qphy->base))
  878. return PTR_ERR(qphy->base);
  879. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  880. "emu_phy_base");
  881. if (res) {
  882. qphy->emu_phy_base = devm_ioremap_resource(dev, res);
  883. if (IS_ERR(qphy->emu_phy_base)) {
  884. dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
  885. qphy->emu_phy_base = NULL;
  886. }
  887. }
  888. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  889. "efuse_addr");
  890. if (res) {
  891. qphy->efuse_reg = devm_ioremap(dev, res->start,
  892. resource_size(res));
  893. if (!IS_ERR_OR_NULL(qphy->efuse_reg)) {
  894. ret = of_property_read_u32(dev->of_node,
  895. "qcom,efuse-bit-pos",
  896. &qphy->efuse_bit_pos);
  897. if (!ret) {
  898. ret = of_property_read_u32(dev->of_node,
  899. "qcom,efuse-num-bits",
  900. &qphy->efuse_num_of_bits);
  901. }
  902. if (ret) {
  903. dev_err(dev,
  904. "DT Value for efuse is invalid.\n");
  905. return -EINVAL;
  906. }
  907. }
  908. }
  909. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  910. "refgen_north_bg_reg_addr");
  911. if (res)
  912. qphy->refgen_north_bg_reg = devm_ioremap(dev, res->start,
  913. resource_size(res));
  914. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  915. "eud_enable_reg");
  916. if (res) {
  917. qphy->eud_enable_reg = devm_ioremap_resource(dev, res);
  918. if (IS_ERR(qphy->eud_enable_reg)) {
  919. dev_err(dev, "err getting eud_enable_reg address\n");
  920. return PTR_ERR(qphy->eud_enable_reg);
  921. }
  922. }
  923. /* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
  924. qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
  925. if (IS_ERR(qphy->ref_clk_src)) {
  926. dev_dbg(dev, "clk get failed for ref_clk_src\n");
  927. ret = PTR_ERR(qphy->ref_clk_src);
  928. return ret;
  929. }
  930. /* ref_clk is needed only for DIFF_CLK case, hence make it optional. */
  931. if (of_property_match_string(pdev->dev.of_node,
  932. "clock-names", "ref_clk") >= 0) {
  933. qphy->ref_clk = devm_clk_get(dev, "ref_clk");
  934. if (IS_ERR(qphy->ref_clk)) {
  935. ret = PTR_ERR(qphy->ref_clk);
  936. if (ret != -EPROBE_DEFER)
  937. dev_dbg(dev,
  938. "clk get failed for ref_clk\n");
  939. return ret;
  940. }
  941. clk_set_rate(qphy->ref_clk, 19200000);
  942. }
  943. if (of_property_match_string(pdev->dev.of_node,
  944. "clock-names", "cfg_ahb_clk") >= 0) {
  945. qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
  946. if (IS_ERR(qphy->cfg_ahb_clk)) {
  947. ret = PTR_ERR(qphy->cfg_ahb_clk);
  948. if (ret != -EPROBE_DEFER)
  949. dev_err(dev,
  950. "clk get failed for cfg_ahb_clk ret %d\n", ret);
  951. return ret;
  952. }
  953. }
  954. qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
  955. if (IS_ERR(qphy->phy_reset))
  956. return PTR_ERR(qphy->phy_reset);
  957. qphy->emulation = of_property_read_bool(dev->of_node,
  958. "qcom,emulation");
  959. of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
  960. if (size) {
  961. qphy->emu_init_seq = devm_kzalloc(dev,
  962. size, GFP_KERNEL);
  963. if (qphy->emu_init_seq) {
  964. qphy->emu_init_seq_len =
  965. (size / sizeof(*qphy->emu_init_seq));
  966. if (qphy->emu_init_seq_len % 2) {
  967. dev_err(dev, "invalid emu_init_seq_len\n");
  968. return -EINVAL;
  969. }
  970. of_property_read_u32_array(dev->of_node,
  971. "qcom,emu-init-seq",
  972. qphy->emu_init_seq,
  973. qphy->emu_init_seq_len);
  974. } else {
  975. dev_dbg(dev,
  976. "error allocating memory for emu_init_seq\n");
  977. }
  978. }
  979. size = 0;
  980. of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
  981. if (size) {
  982. qphy->phy_pll_reset_seq = devm_kzalloc(dev,
  983. size, GFP_KERNEL);
  984. if (qphy->phy_pll_reset_seq) {
  985. qphy->phy_pll_reset_seq_len =
  986. (size / sizeof(*qphy->phy_pll_reset_seq));
  987. if (qphy->phy_pll_reset_seq_len % 2) {
  988. dev_err(dev, "invalid phy_pll_reset_seq_len\n");
  989. return -EINVAL;
  990. }
  991. of_property_read_u32_array(dev->of_node,
  992. "qcom,phy-pll-reset-seq",
  993. qphy->phy_pll_reset_seq,
  994. qphy->phy_pll_reset_seq_len);
  995. } else {
  996. dev_dbg(dev,
  997. "error allocating memory for phy_pll_reset_seq\n");
  998. }
  999. }
  1000. size = 0;
  1001. of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
  1002. if (size) {
  1003. qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
  1004. size, GFP_KERNEL);
  1005. if (qphy->emu_dcm_reset_seq) {
  1006. qphy->emu_dcm_reset_seq_len =
  1007. (size / sizeof(*qphy->emu_dcm_reset_seq));
  1008. if (qphy->emu_dcm_reset_seq_len % 2) {
  1009. dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
  1010. return -EINVAL;
  1011. }
  1012. of_property_read_u32_array(dev->of_node,
  1013. "qcom,emu-dcm-reset-seq",
  1014. qphy->emu_dcm_reset_seq,
  1015. qphy->emu_dcm_reset_seq_len);
  1016. } else {
  1017. dev_dbg(dev,
  1018. "error allocating memory for emu_dcm_reset_seq\n");
  1019. }
  1020. }
  1021. size = 0;
  1022. of_get_property(dev->of_node, "qcom,qusb-phy-reg-offset", &size);
  1023. if (size) {
  1024. qphy->phy_reg = devm_kzalloc(dev, size, GFP_KERNEL);
  1025. if (qphy->phy_reg) {
  1026. qphy->qusb_phy_reg_offset_cnt =
  1027. size / sizeof(*qphy->phy_reg);
  1028. if (qphy->qusb_phy_reg_offset_cnt != USB2_PHY_REG_MAX) {
  1029. dev_err(dev, "invalid reg offset count\n");
  1030. return -EINVAL;
  1031. }
  1032. of_property_read_u32_array(dev->of_node,
  1033. "qcom,qusb-phy-reg-offset",
  1034. qphy->phy_reg,
  1035. qphy->qusb_phy_reg_offset_cnt);
  1036. } else {
  1037. dev_err(dev, "err mem alloc for qusb_phy_reg_offset\n");
  1038. return -ENOMEM;
  1039. }
  1040. } else {
  1041. dev_err(dev, "err provide qcom,qmp-phy-reg-offset\n");
  1042. return -EINVAL;
  1043. }
  1044. size = 0;
  1045. of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
  1046. if (size) {
  1047. qphy->qusb_phy_init_seq = devm_kzalloc(dev,
  1048. size, GFP_KERNEL);
  1049. if (qphy->qusb_phy_init_seq) {
  1050. qphy->init_seq_len =
  1051. (size / sizeof(*qphy->qusb_phy_init_seq));
  1052. if (qphy->init_seq_len % 2) {
  1053. dev_err(dev, "invalid init_seq_len\n");
  1054. return -EINVAL;
  1055. }
  1056. of_property_read_u32_array(dev->of_node,
  1057. "qcom,qusb-phy-init-seq",
  1058. qphy->qusb_phy_init_seq,
  1059. qphy->init_seq_len);
  1060. } else {
  1061. dev_err(dev,
  1062. "error allocating memory for phy_init_seq\n");
  1063. }
  1064. }
  1065. qphy->host_init_seq_len = of_property_count_elems_of_size(dev->of_node,
  1066. "qcom,qusb-phy-host-init-seq",
  1067. sizeof(*qphy->qusb_phy_host_init_seq));
  1068. if (qphy->host_init_seq_len > 0) {
  1069. qphy->qusb_phy_host_init_seq = devm_kcalloc(dev,
  1070. qphy->host_init_seq_len,
  1071. sizeof(*qphy->qusb_phy_host_init_seq),
  1072. GFP_KERNEL);
  1073. if (qphy->qusb_phy_host_init_seq)
  1074. of_property_read_u32_array(dev->of_node,
  1075. "qcom,qusb-phy-host-init-seq",
  1076. qphy->qusb_phy_host_init_seq,
  1077. qphy->host_init_seq_len);
  1078. else
  1079. return -ENOMEM;
  1080. }
  1081. qphy->override_bias_ctrl2 = of_property_read_bool(dev->of_node,
  1082. "qcom,override-bias-ctrl2");
  1083. ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
  1084. (u32 *) qphy->vdd_levels,
  1085. ARRAY_SIZE(qphy->vdd_levels));
  1086. if (ret) {
  1087. dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
  1088. return ret;
  1089. }
  1090. ret = qusb2_get_regulators(qphy);
  1091. if (ret)
  1092. return ret;
  1093. mutex_init(&qphy->lock);
  1094. platform_set_drvdata(pdev, qphy);
  1095. qphy->phy.label = "msm-qusb-phy-v2";
  1096. qphy->phy.init = qusb_phy_init;
  1097. qphy->phy.set_suspend = qusb_phy_set_suspend;
  1098. qphy->phy.shutdown = qusb_phy_shutdown;
  1099. qphy->phy.type = USB_PHY_TYPE_USB2;
  1100. qphy->phy.notify_connect = qusb_phy_notify_connect;
  1101. qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
  1102. qphy->phy.charger_detect = usb_phy_drive_dp_pulse;
  1103. ret = usb_add_phy_dev(&qphy->phy);
  1104. if (ret)
  1105. return ret;
  1106. ret = qusb_phy_regulator_init(qphy);
  1107. if (ret)
  1108. usb_remove_phy(&qphy->phy);
  1109. qphy->suspended = true;
  1110. qusb_phy_create_debugfs(qphy);
  1111. /*
  1112. * EUD may be enable in boot loader and to keep EUD session alive across
  1113. * kernel boot till USB phy driver is initialized based on cable status,
  1114. * keep LDOs on here.
  1115. */
  1116. if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg))
  1117. qusb_phy_enable_power(qphy);
  1118. return ret;
  1119. }
  1120. static int qusb_phy_remove(struct platform_device *pdev)
  1121. {
  1122. struct qusb_phy *qphy = platform_get_drvdata(pdev);
  1123. usb_remove_phy(&qphy->phy);
  1124. qphy->cable_connected = false;
  1125. qusb_phy_set_suspend(&qphy->phy, true);
  1126. debugfs_remove_recursive(qphy->root);
  1127. return 0;
  1128. }
  1129. static const struct of_device_id qusb_phy_id_table[] = {
  1130. { .compatible = "qcom,qusb2phy-v2", },
  1131. { },
  1132. };
  1133. MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
  1134. static struct platform_driver qusb_phy_driver = {
  1135. .probe = qusb_phy_probe,
  1136. .remove = qusb_phy_remove,
  1137. .driver = {
  1138. .name = "msm-qusb-phy-v2",
  1139. .of_match_table = of_match_ptr(qusb_phy_id_table),
  1140. },
  1141. };
  1142. module_platform_driver(qusb_phy_driver);
  1143. MODULE_DESCRIPTION("MSM QUSB2 PHY v2 driver");
  1144. MODULE_LICENSE("GPL");