gdsc-regulator.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/io.h>
  9. #include <linux/delay.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/regulator/debug-regulator.h>
  14. #include <linux/regulator/driver.h>
  15. #include <linux/regulator/machine.h>
  16. #include <linux/regulator/of_regulator.h>
  17. #include <linux/regulator/proxy-consumer.h>
  18. #include <linux/slab.h>
  19. #include <linux/clk.h>
  20. #include <linux/regmap.h>
  21. #include <linux/reset.h>
  22. #include <linux/mfd/syscon.h>
  23. #include <linux/mailbox_client.h>
  24. #include <linux/mailbox_controller.h>
  25. #include <linux/mailbox/qmp.h>
  26. #include <linux/interconnect.h>
  27. #include "../../regulator/internal.h"
  28. #include "gdsc-debug.h"
  29. #define CREATE_TRACE_POINTS
  30. #include "trace-gdsc.h"
  31. /* GDSCR */
  32. #define PWR_ON_MASK BIT(31)
  33. #define CLK_DIS_WAIT_MASK (0xF << 12)
  34. #define CLK_DIS_WAIT_SHIFT (12)
  35. #define RETAIN_FF_ENABLE_MASK BIT(11)
  36. #define SW_OVERRIDE_MASK BIT(2)
  37. #define HW_CONTROL_MASK BIT(1)
  38. #define SW_COLLAPSE_MASK BIT(0)
  39. /* CFG_GDSCR */
  40. #define POWER_DOWN_COMPLETE_MASK BIT(15)
  41. #define POWER_UP_COMPLETE_MASK BIT(16)
  42. /* Domain Address */
  43. #define GMEM_CLAMP_IO_MASK BIT(0)
  44. #define GMEM_RESET_MASK BIT(4)
  45. /* SW Reset */
  46. #define BCR_BLK_ARES_BIT BIT(0)
  47. /* Register Offset */
  48. #define REG_OFFSET 0x0
  49. #define CFG_GDSCR_OFFSET (REG_OFFSET + 0x4)
  50. /* Timeout Delay */
  51. #define TIMEOUT_US 1500
  52. #define MBOX_TOUT_MS 500
  53. struct collapse_vote {
  54. struct regmap **regmap;
  55. u32 vote_bit;
  56. };
  57. struct gdsc {
  58. struct regulator_dev *rdev;
  59. struct regulator_desc rdesc;
  60. void __iomem *gdscr;
  61. struct regmap *regmap;
  62. struct regmap *domain_addr;
  63. struct regmap *hw_ctrl;
  64. struct regmap **sw_resets;
  65. struct regmap *acd_reset;
  66. struct regmap *acd_misc_reset;
  67. struct collapse_vote collapse_vote;
  68. struct clk **clocks;
  69. struct mbox_client mbox_client;
  70. struct mbox_chan *mbox;
  71. struct reset_control **reset_clocks;
  72. struct icc_path **paths;
  73. bool toggle_logic;
  74. bool retain_ff_enable;
  75. bool resets_asserted;
  76. bool root_en;
  77. bool force_root_en;
  78. bool no_status_check_on_disable;
  79. bool is_gdsc_enabled;
  80. bool is_gdsc_hw_ctrl_mode;
  81. bool is_root_clk_voted;
  82. bool reset_aon;
  83. int clock_count;
  84. int reset_count;
  85. int root_clk_idx;
  86. int sw_reset_count;
  87. int path_count;
  88. int collapse_count;
  89. u32 gds_timeout;
  90. bool skip_disable_before_enable;
  91. bool skip_disable;
  92. bool bypass_skip_disable;
  93. bool cfg_gdscr;
  94. };
  95. enum gdscr_status {
  96. DISABLED,
  97. ENABLED,
  98. };
  99. static inline u32 gdsc_mb(struct gdsc *gds)
  100. {
  101. u32 reg;
  102. regmap_read(gds->regmap, REG_OFFSET, &reg);
  103. return reg;
  104. }
  105. static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
  106. {
  107. struct regmap *regmap;
  108. int count = sc->gds_timeout;
  109. u32 val, reg_offset;
  110. if (sc->hw_ctrl && !sc->cfg_gdscr)
  111. regmap = sc->hw_ctrl;
  112. else
  113. regmap = sc->regmap;
  114. if (sc->cfg_gdscr)
  115. reg_offset = CFG_GDSCR_OFFSET;
  116. else
  117. reg_offset = REG_OFFSET;
  118. for (; count > 0; count--) {
  119. regmap_read(regmap, reg_offset, &val);
  120. switch (status) {
  121. case ENABLED:
  122. if (sc->cfg_gdscr)
  123. val &= POWER_UP_COMPLETE_MASK;
  124. else
  125. val &= PWR_ON_MASK;
  126. break;
  127. case DISABLED:
  128. if (sc->cfg_gdscr) {
  129. val &= POWER_DOWN_COMPLETE_MASK;
  130. } else {
  131. val &= PWR_ON_MASK;
  132. val = !val;
  133. }
  134. break;
  135. }
  136. if (val) {
  137. trace_gdsc_time(sc->rdesc.name, status,
  138. sc->gds_timeout - count, 0);
  139. return 0;
  140. }
  141. /*
  142. * There is no guarantee about the delay needed for the enable
  143. * bit in the GDSCR to be set or reset after the GDSC state
  144. * changes. Hence, keep on checking for a reasonable number
  145. * of times until the bit is set with the least possible delay
  146. * between successive tries.
  147. */
  148. udelay(1);
  149. }
  150. trace_gdsc_time(sc->rdesc.name, status,
  151. sc->gds_timeout - count, 1);
  152. return -ETIMEDOUT;
  153. }
  154. static int check_gdsc_status(struct gdsc *sc, struct device *pdev,
  155. enum gdscr_status state)
  156. {
  157. u32 regval, hw_ctrl_regval;
  158. int ret;
  159. static const char * const gdsc_states[] = {
  160. "disable",
  161. "enable",
  162. };
  163. if (!sc || !sc->regmap || !pdev)
  164. return -EINVAL;
  165. ret = poll_gdsc_status(sc, state);
  166. if (ret) {
  167. regmap_read(sc->regmap, REG_OFFSET, &regval);
  168. if (sc->hw_ctrl) {
  169. regmap_read(sc->hw_ctrl, REG_OFFSET,
  170. &hw_ctrl_regval);
  171. dev_warn(pdev, "%s %s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
  172. sc->rdesc.name, gdsc_states[state], sc->gds_timeout,
  173. regval, hw_ctrl_regval);
  174. ret = poll_gdsc_status(sc, state);
  175. if (ret) {
  176. regmap_read(sc->regmap, REG_OFFSET,
  177. &regval);
  178. regmap_read(sc->hw_ctrl, REG_OFFSET,
  179. &hw_ctrl_regval);
  180. dev_err(pdev, "%s %s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
  181. sc->rdesc.name, gdsc_states[state], sc->gds_timeout,
  182. regval, hw_ctrl_regval);
  183. return ret;
  184. }
  185. } else {
  186. dev_err(pdev, "%s %s timed out: 0x%x\n",
  187. sc->rdesc.name, gdsc_states[state], regval);
  188. udelay(sc->gds_timeout);
  189. regmap_read(sc->regmap, REG_OFFSET, &regval);
  190. dev_err(pdev, "%s %s final state: 0x%x (%d us timeout)\n",
  191. sc->rdesc.name, gdsc_states[state], regval, sc->gds_timeout);
  192. return ret;
  193. }
  194. }
  195. return ret;
  196. }
  197. static int gdsc_init_is_enabled(struct gdsc *sc)
  198. {
  199. struct regmap *regmap;
  200. uint32_t regval, mask;
  201. int ret, i;
  202. if (!sc->toggle_logic) {
  203. sc->is_gdsc_enabled = !sc->resets_asserted;
  204. return 0;
  205. }
  206. if (sc->collapse_count) {
  207. for (i = 0; i < sc->collapse_count; i++)
  208. regmap = sc->collapse_vote.regmap[i];
  209. mask = BIT(sc->collapse_vote.vote_bit);
  210. } else {
  211. regmap = sc->regmap;
  212. mask = SW_COLLAPSE_MASK;
  213. }
  214. ret = regmap_read(regmap, REG_OFFSET, &regval);
  215. if (ret < 0)
  216. return ret;
  217. sc->is_gdsc_enabled = !(regval & mask);
  218. if (sc->is_gdsc_enabled && sc->retain_ff_enable)
  219. regmap_update_bits(sc->regmap, REG_OFFSET,
  220. RETAIN_FF_ENABLE_MASK, RETAIN_FF_ENABLE_MASK);
  221. return 0;
  222. }
  223. static int gdsc_is_enabled(struct regulator_dev *rdev)
  224. {
  225. struct gdsc *sc = rdev_get_drvdata(rdev);
  226. /*
  227. * Return the logical GDSC enable state given that it will only be
  228. * physically disabled by AOP during system sleep.
  229. */
  230. if (sc->skip_disable)
  231. return sc->is_gdsc_enabled;
  232. if (!sc->toggle_logic)
  233. return !sc->resets_asserted;
  234. if (sc->skip_disable_before_enable)
  235. return false;
  236. return sc->is_gdsc_enabled;
  237. }
  238. #define MAX_LEN 96
  239. static int gdsc_qmp_enable(struct gdsc *sc)
  240. {
  241. char buf[MAX_LEN] = "{class: clock, res: gpu_noc_wa}";
  242. struct qmp_pkt pkt;
  243. uint32_t regval;
  244. int ret;
  245. regmap_read(sc->regmap, REG_OFFSET, &regval);
  246. if (!(regval & SW_COLLAPSE_MASK)) {
  247. /*
  248. * Do not enable via a QMP request if the GDSC is already
  249. * enabled by software.
  250. */
  251. return 0;
  252. }
  253. pkt.size = MAX_LEN;
  254. pkt.data = buf;
  255. ret = mbox_send_message(sc->mbox, &pkt);
  256. if (ret < 0)
  257. dev_err(&sc->rdev->dev, "qmp message send failed, ret=%d\n",
  258. ret);
  259. return ret;
  260. }
  261. static int gdsc_enable(struct regulator_dev *rdev)
  262. {
  263. struct gdsc *sc = rdev_get_drvdata(rdev);
  264. u32 regval;
  265. int i, ret = 0;
  266. if (sc->skip_disable_before_enable)
  267. return 0;
  268. if (sc->root_en || sc->force_root_en) {
  269. clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
  270. sc->is_root_clk_voted = true;
  271. }
  272. regmap_read(sc->regmap, REG_OFFSET, &regval);
  273. if (regval & HW_CONTROL_MASK) {
  274. dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
  275. sc->rdesc.name);
  276. return -EBUSY;
  277. }
  278. if (sc->toggle_logic) {
  279. for (i = 0; i < sc->path_count; i++) {
  280. ret = icc_set_bw(sc->paths[i], 1, 1);
  281. if (ret) {
  282. dev_err(&rdev->dev, "Failed to vote BW for %d, ret=%d\n", i, ret);
  283. return ret;
  284. }
  285. }
  286. if (sc->sw_reset_count) {
  287. for (i = 0; i < sc->sw_reset_count; i++)
  288. regmap_set_bits(sc->sw_resets[i], REG_OFFSET,
  289. BCR_BLK_ARES_BIT);
  290. if (sc->acd_reset)
  291. regmap_set_bits(sc->acd_reset, REG_OFFSET, BCR_BLK_ARES_BIT);
  292. if (sc->acd_misc_reset)
  293. regmap_set_bits(sc->acd_misc_reset, REG_OFFSET, BCR_BLK_ARES_BIT);
  294. /*
  295. * BLK_ARES should be kept asserted for at least 100 us
  296. * before being de-asserted.
  297. * This is necessary as in HW there are 3 demet cells
  298. * on sleep clk to synchronize the BLK_ARES.
  299. */
  300. gdsc_mb(sc);
  301. udelay(100);
  302. for (i = 0; i < sc->sw_reset_count; i++)
  303. regmap_clear_bits(sc->sw_resets[i], REG_OFFSET,
  304. BCR_BLK_ARES_BIT);
  305. if (sc->acd_reset)
  306. regmap_clear_bits(sc->acd_reset, REG_OFFSET, BCR_BLK_ARES_BIT);
  307. if (sc->acd_misc_reset)
  308. regmap_clear_bits(sc->acd_misc_reset, REG_OFFSET, BCR_BLK_ARES_BIT);
  309. /* Make sure de-assert goes through before continuing */
  310. gdsc_mb(sc);
  311. }
  312. if (sc->domain_addr) {
  313. if (sc->reset_aon) {
  314. regmap_read(sc->domain_addr, REG_OFFSET,
  315. &regval);
  316. regval |= GMEM_RESET_MASK;
  317. regmap_write(sc->domain_addr, REG_OFFSET,
  318. regval);
  319. /*
  320. * Keep reset asserted for at-least 1us before
  321. * continuing.
  322. */
  323. gdsc_mb(sc);
  324. udelay(1);
  325. regval &= ~GMEM_RESET_MASK;
  326. regmap_write(sc->domain_addr, REG_OFFSET,
  327. regval);
  328. /*
  329. * Make sure GMEM_RESET is de-asserted before
  330. * continuing.
  331. */
  332. gdsc_mb(sc);
  333. }
  334. regmap_read(sc->domain_addr, REG_OFFSET, &regval);
  335. regval &= ~GMEM_CLAMP_IO_MASK;
  336. regmap_write(sc->domain_addr, REG_OFFSET, regval);
  337. /*
  338. * Make sure CLAMP_IO is de-asserted before continuing.
  339. */
  340. gdsc_mb(sc);
  341. }
  342. /* Enable gdsc */
  343. if (sc->mbox) {
  344. ret = gdsc_qmp_enable(sc);
  345. if (ret < 0)
  346. return ret;
  347. } else if (sc->collapse_count) {
  348. for (i = 0; i < sc->collapse_count; i++)
  349. regmap_update_bits(sc->collapse_vote.regmap[i], REG_OFFSET,
  350. BIT(sc->collapse_vote.vote_bit),
  351. ~BIT(sc->collapse_vote.vote_bit));
  352. } else {
  353. regmap_read(sc->regmap, REG_OFFSET, &regval);
  354. regval &= ~SW_COLLAPSE_MASK;
  355. regmap_write(sc->regmap, REG_OFFSET, regval);
  356. }
  357. /* Wait for 8 XO cycles before polling the status bit. */
  358. gdsc_mb(sc);
  359. udelay(1);
  360. ret = check_gdsc_status(sc, &rdev->dev, ENABLED);
  361. if (ret)
  362. return ret;
  363. if (sc->retain_ff_enable && !(regval & RETAIN_FF_ENABLE_MASK)) {
  364. regval |= RETAIN_FF_ENABLE_MASK;
  365. regmap_write(sc->regmap, REG_OFFSET, regval);
  366. }
  367. } else {
  368. for (i = 0; i < sc->reset_count; i++)
  369. reset_control_deassert(sc->reset_clocks[i]);
  370. sc->resets_asserted = false;
  371. }
  372. /*
  373. * If clocks to this power domain were already on, they will take an
  374. * additional 4 clock cycles to re-enable after the rail is enabled.
  375. * Delay to account for this. A delay is also needed to ensure clocks
  376. * are not enabled within 400ns of enabling power to the memories.
  377. */
  378. udelay(1);
  379. /* Delay to account for staggered memory powerup. */
  380. udelay(1);
  381. if (sc->force_root_en) {
  382. clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
  383. sc->is_root_clk_voted = false;
  384. }
  385. sc->is_gdsc_enabled = true;
  386. return ret;
  387. }
  388. static int gdsc_disable(struct regulator_dev *rdev)
  389. {
  390. struct gdsc *sc = rdev_get_drvdata(rdev);
  391. struct regulator_dev *parent_rdev;
  392. uint32_t regval;
  393. int i, ret = 0;
  394. bool lock = false;
  395. if (rdev->supply) {
  396. parent_rdev = rdev->supply->rdev;
  397. /*
  398. * At this point, it can be assumed that parent supply's mutex is always
  399. * locked by regulator framework before calling this callback but there
  400. * are code paths where it isn't locked (e.g regulator_late_cleanup()).
  401. *
  402. * If parent supply is not locked, lock the parent supply mutex before
  403. * checking it's enable count, so it won't get disabled while in the
  404. * middle of GDSC operations
  405. */
  406. if (ww_mutex_trylock(&parent_rdev->mutex, NULL))
  407. lock = true;
  408. if (!parent_rdev->use_count) {
  409. dev_err(&rdev->dev, "%s cannot disable GDSC while parent is disabled\n",
  410. sc->rdesc.name);
  411. ret = -EIO;
  412. goto done;
  413. }
  414. }
  415. if (sc->force_root_en) {
  416. clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
  417. sc->is_root_clk_voted = true;
  418. }
  419. /* Delay to account for staggered memory powerdown. */
  420. udelay(1);
  421. if (sc->skip_disable && !sc->bypass_skip_disable) {
  422. /*
  423. * Don't change the GDSCR register state on disable. AOP will
  424. * handle this during system sleep.
  425. */
  426. } else if (sc->toggle_logic) {
  427. if (sc->sw_reset_count) {
  428. if (sc->acd_misc_reset)
  429. regmap_set_bits(sc->acd_misc_reset, REG_OFFSET,
  430. BCR_BLK_ARES_BIT);
  431. }
  432. /* Disable gdsc */
  433. if (sc->collapse_count) {
  434. for (i = 0; i < sc->collapse_count; i++)
  435. regmap_update_bits(sc->collapse_vote.regmap[i], REG_OFFSET,
  436. BIT(sc->collapse_vote.vote_bit),
  437. BIT(sc->collapse_vote.vote_bit));
  438. } else {
  439. regmap_read(sc->regmap, REG_OFFSET, &regval);
  440. regval |= SW_COLLAPSE_MASK;
  441. regmap_write(sc->regmap, REG_OFFSET, regval);
  442. }
  443. /* Wait for 8 XO cycles before polling the status bit. */
  444. gdsc_mb(sc);
  445. udelay(1);
  446. if (sc->no_status_check_on_disable) {
  447. /*
  448. * Add a short delay here to ensure that gdsc_enable
  449. * right after it was disabled does not put it in a
  450. * weird state.
  451. */
  452. udelay(100);
  453. } else {
  454. ret = check_gdsc_status(sc, &rdev->dev, DISABLED);
  455. if (ret)
  456. goto done;
  457. }
  458. if (sc->domain_addr) {
  459. regmap_read(sc->domain_addr, REG_OFFSET, &regval);
  460. regval |= GMEM_CLAMP_IO_MASK;
  461. regmap_write(sc->domain_addr, REG_OFFSET, regval);
  462. }
  463. for (i = 0; i < sc->path_count; i++) {
  464. ret = icc_set_bw(sc->paths[i], 0, 0);
  465. if (ret) {
  466. dev_err(&rdev->dev, "Failed to unvote BW for %d: %d\n", i, ret);
  467. goto done;
  468. }
  469. }
  470. } else {
  471. for (i = sc->reset_count - 1; i >= 0; i--)
  472. reset_control_assert(sc->reset_clocks[i]);
  473. sc->resets_asserted = true;
  474. }
  475. /*
  476. * Check if gdsc_enable was called for this GDSC. If not, the root
  477. * clock will not have been enabled prior to this.
  478. */
  479. if ((sc->is_root_clk_voted && sc->root_en) || sc->force_root_en) {
  480. clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
  481. sc->is_root_clk_voted = false;
  482. }
  483. sc->is_gdsc_enabled = false;
  484. done:
  485. if (rdev->supply && lock)
  486. ww_mutex_unlock(&parent_rdev->mutex);
  487. return ret;
  488. }
  489. static int gdsc_init_hw_ctrl_mode(struct gdsc *sc)
  490. {
  491. uint32_t regval;
  492. int ret;
  493. ret = regmap_read(sc->regmap, REG_OFFSET, &regval);
  494. if (ret < 0)
  495. return ret;
  496. sc->is_gdsc_hw_ctrl_mode = regval & HW_CONTROL_MASK;
  497. return 0;
  498. }
  499. static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
  500. {
  501. struct gdsc *sc = rdev_get_drvdata(rdev);
  502. if (sc->skip_disable) {
  503. if (sc->bypass_skip_disable)
  504. return REGULATOR_MODE_IDLE;
  505. return REGULATOR_MODE_NORMAL;
  506. }
  507. return sc->is_gdsc_hw_ctrl_mode ? REGULATOR_MODE_FAST
  508. : REGULATOR_MODE_NORMAL;
  509. }
  510. static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
  511. {
  512. struct gdsc *sc = rdev_get_drvdata(rdev);
  513. struct regulator_dev *parent_rdev;
  514. uint32_t regval;
  515. int ret = 0;
  516. if (sc->skip_disable) {
  517. switch (mode) {
  518. case REGULATOR_MODE_IDLE:
  519. sc->bypass_skip_disable = true;
  520. break;
  521. case REGULATOR_MODE_NORMAL:
  522. sc->bypass_skip_disable = false;
  523. break;
  524. default:
  525. ret = -EINVAL;
  526. break;
  527. }
  528. return ret;
  529. }
  530. if (rdev->supply) {
  531. parent_rdev = rdev->supply->rdev;
  532. /*
  533. * Ensure that the GDSC parent supply is enabled before
  534. * continuing. This is needed to avoid an unclocked access
  535. * of the GDSC control register for GDSCs whose register access
  536. * is gated by the parent supply enable state in hardware.
  537. */
  538. ww_mutex_lock(&parent_rdev->mutex, NULL);
  539. if (!parent_rdev->use_count) {
  540. dev_err(&rdev->dev,
  541. "%s cannot change GDSC HW/SW control mode while parent is disabled\n",
  542. sc->rdesc.name);
  543. ret = -EIO;
  544. goto done;
  545. }
  546. }
  547. ret = regmap_read(sc->regmap, REG_OFFSET, &regval);
  548. if (ret < 0)
  549. goto done;
  550. switch (mode) {
  551. case REGULATOR_MODE_FAST:
  552. /* Turn on HW trigger mode */
  553. regval |= HW_CONTROL_MASK;
  554. ret = regmap_write(sc->regmap, REG_OFFSET, regval);
  555. if (ret < 0)
  556. goto done;
  557. /*
  558. * There may be a race with internal HW trigger signal,
  559. * that will result in GDSC going through a power down and
  560. * up cycle. In case HW trigger signal is controlled by
  561. * firmware that also poll same status bits as we do, FW
  562. * might read an 'on' status before the GDSC can finish
  563. * power cycle. We wait 1us before returning to ensure
  564. * FW can't immediately poll the status bit.
  565. */
  566. gdsc_mb(sc);
  567. udelay(1);
  568. sc->is_gdsc_hw_ctrl_mode = true;
  569. break;
  570. case REGULATOR_MODE_NORMAL:
  571. /* Turn off HW trigger mode */
  572. regval &= ~HW_CONTROL_MASK;
  573. ret = regmap_write(sc->regmap, REG_OFFSET, regval);
  574. if (ret < 0)
  575. goto done;
  576. /*
  577. * There may be a race with internal HW trigger signal,
  578. * that will result in GDSC going through a power down and
  579. * up cycle. Account for this case by waiting 1us before
  580. * proceeding.
  581. */
  582. gdsc_mb(sc);
  583. udelay(1);
  584. /*
  585. * While switching from HW to SW mode, HW may be busy
  586. * updating internal required signals. Polling for PWR_ON
  587. * ensures that the GDSC switches to SW mode before software
  588. * starts to use SW mode.
  589. */
  590. if (sc->is_gdsc_enabled) {
  591. ret = check_gdsc_status(sc, &rdev->dev, ENABLED);
  592. if (ret)
  593. goto done;
  594. }
  595. sc->is_gdsc_hw_ctrl_mode = false;
  596. break;
  597. default:
  598. ret = -EINVAL;
  599. break;
  600. }
  601. done:
  602. if (rdev->supply)
  603. ww_mutex_unlock(&parent_rdev->mutex);
  604. return ret;
  605. }
  606. static const struct regulator_ops gdsc_ops = {
  607. .is_enabled = gdsc_is_enabled,
  608. .enable = gdsc_enable,
  609. .disable = gdsc_disable,
  610. .set_mode = gdsc_set_mode,
  611. .get_mode = gdsc_get_mode,
  612. };
  613. static struct regmap_config gdsc_regmap_config = {
  614. .reg_bits = 32,
  615. .reg_stride = 4,
  616. .val_bits = 32,
  617. .max_register = 0x8,
  618. .fast_io = true,
  619. };
  620. void gdsc_debug_print_regs(struct regulator *regulator)
  621. {
  622. struct regulator_dev *rdev = regulator->rdev;
  623. struct gdsc *sc = rdev_get_drvdata(rdev);
  624. struct regulator *reg;
  625. const char *supply_name;
  626. uint32_t regvals[3] = {0};
  627. int ret;
  628. if (!sc) {
  629. pr_err("Failed to get GDSC Handle\n");
  630. return;
  631. }
  632. ww_mutex_lock(&rdev->mutex, NULL);
  633. if (rdev->open_count)
  634. pr_info("%-32s EN\n", "Device-Supply");
  635. list_for_each_entry(reg, &rdev->consumer_list, list) {
  636. if (reg->supply_name)
  637. supply_name = reg->supply_name;
  638. else
  639. supply_name = "(null)-(null)";
  640. pr_info("%-32s %c\n", supply_name,
  641. (reg->enable_count ? 'Y' : 'N'));
  642. }
  643. ww_mutex_unlock(&rdev->mutex);
  644. ret = regmap_bulk_read(sc->regmap, REG_OFFSET, regvals,
  645. gdsc_regmap_config.max_register ? 3 : 1);
  646. if (ret) {
  647. pr_err("Failed to read %s registers\n", sc->rdesc.name);
  648. return;
  649. }
  650. pr_info("Dumping %s Registers:\n", sc->rdesc.name);
  651. pr_info("GDSCR: 0x%.8x CFG: 0x%.8x CFG2: 0x%.8x\n",
  652. regvals[0], regvals[1], regvals[2]);
  653. }
  654. EXPORT_SYMBOL(gdsc_debug_print_regs);
  655. static int gdsc_parse_resets(struct gdsc *sc, struct device *dev)
  656. {
  657. struct device_node *np;
  658. int i;
  659. if (of_find_property(dev->of_node, "sw-reset", NULL)) {
  660. sc->sw_reset_count = of_count_phandle_with_args(dev->of_node,
  661. "sw-reset", NULL);
  662. sc->sw_resets = devm_kmalloc_array(dev, sc->sw_reset_count,
  663. sizeof(*sc->sw_resets), GFP_KERNEL);
  664. if (!sc->sw_resets)
  665. return -ENOMEM;
  666. for (i = 0; i < sc->sw_reset_count; i++) {
  667. np = of_parse_phandle(dev->of_node, "sw-reset", i);
  668. if (!np)
  669. return -ENODEV;
  670. sc->sw_resets[i] = syscon_node_to_regmap(np);
  671. of_node_put(np);
  672. if (IS_ERR(sc->sw_resets[i]))
  673. return PTR_ERR(sc->sw_resets[i]);
  674. }
  675. }
  676. if (of_find_property(dev->of_node, "acd-reset", NULL)) {
  677. sc->acd_reset = syscon_regmap_lookup_by_phandle(dev->of_node,
  678. "acd-reset");
  679. if (IS_ERR(sc->acd_reset))
  680. return PTR_ERR(sc->acd_reset);
  681. }
  682. if (of_find_property(dev->of_node, "acd-misc-reset", NULL)) {
  683. sc->acd_misc_reset = syscon_regmap_lookup_by_phandle(dev->of_node,
  684. "acd-misc-reset");
  685. if (IS_ERR(sc->acd_misc_reset))
  686. return PTR_ERR(sc->acd_misc_reset);
  687. }
  688. return 0;
  689. }
  690. static int gdsc_parse_dt_data(struct gdsc *sc, struct device *dev,
  691. struct regulator_init_data **init_data)
  692. {
  693. struct device_node *np;
  694. int ret, i;
  695. *init_data = of_get_regulator_init_data(dev, dev->of_node, &sc->rdesc);
  696. if (*init_data == NULL)
  697. return -ENOMEM;
  698. if (of_get_property(dev->of_node, "parent-supply", NULL))
  699. (*init_data)->supply_regulator = "parent";
  700. ret = of_property_read_string(dev->of_node, "regulator-name",
  701. &sc->rdesc.name);
  702. if (ret)
  703. return ret;
  704. if (of_find_property(dev->of_node, "domain-addr", NULL)) {
  705. sc->domain_addr = syscon_regmap_lookup_by_phandle(dev->of_node,
  706. "domain-addr");
  707. if (IS_ERR(sc->domain_addr))
  708. return PTR_ERR(sc->domain_addr);
  709. }
  710. ret = gdsc_parse_resets(sc, dev);
  711. if (ret)
  712. return ret;
  713. if (of_find_property(dev->of_node, "hw-ctrl-addr", NULL)) {
  714. sc->hw_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
  715. "hw-ctrl-addr");
  716. if (IS_ERR(sc->hw_ctrl))
  717. return PTR_ERR(sc->hw_ctrl);
  718. }
  719. sc->gds_timeout = TIMEOUT_US;
  720. of_property_read_u32(dev->of_node, "qcom,gds-timeout",
  721. &sc->gds_timeout);
  722. sc->clock_count = of_property_count_strings(dev->of_node,
  723. "clock-names");
  724. if (sc->clock_count == -EINVAL) {
  725. sc->clock_count = 0;
  726. } else if (sc->clock_count < 0) {
  727. dev_err(dev, "Failed to get clock names, ret=%d\n",
  728. sc->clock_count);
  729. return sc->clock_count;
  730. }
  731. sc->path_count = of_property_count_strings(dev->of_node,
  732. "interconnect-names");
  733. if (sc->path_count == -EINVAL) {
  734. sc->path_count = 0;
  735. } else if (sc->path_count < 0) {
  736. dev_err(dev, "Failed to get interconnect names, ret=%d\n",
  737. sc->path_count);
  738. return sc->path_count;
  739. }
  740. sc->root_en = of_property_read_bool(dev->of_node,
  741. "qcom,enable-root-clk");
  742. sc->force_root_en = of_property_read_bool(dev->of_node,
  743. "qcom,force-enable-root-clk");
  744. sc->reset_aon = of_property_read_bool(dev->of_node,
  745. "qcom,reset-aon-logic");
  746. sc->no_status_check_on_disable = of_property_read_bool(dev->of_node,
  747. "qcom,no-status-check-on-disable");
  748. sc->retain_ff_enable = of_property_read_bool(dev->of_node,
  749. "qcom,retain-regs");
  750. sc->skip_disable_before_enable = of_property_read_bool(dev->of_node,
  751. "qcom,skip-disable-before-sw-enable");
  752. sc->cfg_gdscr = of_property_read_bool(dev->of_node,
  753. "qcom,support-cfg-gdscr");
  754. if (of_find_property(dev->of_node, "qcom,collapse-vote", NULL)) {
  755. /* Decrement the collapse count by 1 */
  756. sc->collapse_count = of_property_count_u32_elems(dev->of_node,
  757. "qcom,collapse-vote") - 1;
  758. sc->collapse_vote.regmap = devm_kmalloc_array(dev, sc->collapse_count,
  759. sizeof(*(sc->collapse_vote).regmap), GFP_KERNEL);
  760. if (!sc->collapse_vote.regmap)
  761. return -ENOMEM;
  762. for (i = 0; i < sc->collapse_count; i++) {
  763. np = of_parse_phandle(dev->of_node, "qcom,collapse-vote", i);
  764. if (!np)
  765. return -ENODEV;
  766. sc->collapse_vote.regmap[i] = syscon_node_to_regmap(np);
  767. of_node_put(np);
  768. if (IS_ERR(sc->collapse_vote.regmap[i]))
  769. return PTR_ERR(sc->collapse_vote.regmap[i]);
  770. }
  771. ret = of_property_read_u32_index(dev->of_node, "qcom,collapse-vote",
  772. sc->collapse_count, &sc->collapse_vote.vote_bit);
  773. if (ret || sc->collapse_vote.vote_bit > 31) {
  774. dev_err(dev, "qcom,collapse-vote vote_bit error\n");
  775. return ret;
  776. }
  777. }
  778. sc->skip_disable = of_property_read_bool(dev->of_node,
  779. "qcom,skip-disable");
  780. if (sc->skip_disable) {
  781. /*
  782. * If the disable skipping feature is allowed, then use mode
  783. * control to enable and disable the feature at runtime instead
  784. * of using it to enable and disable hardware triggering.
  785. */
  786. (*init_data)->constraints.valid_ops_mask |=
  787. REGULATOR_CHANGE_MODE;
  788. (*init_data)->constraints.valid_modes_mask =
  789. REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
  790. }
  791. sc->toggle_logic = !of_property_read_bool(dev->of_node,
  792. "qcom,skip-logic-collapse");
  793. if (!sc->toggle_logic) {
  794. sc->reset_count = of_property_count_strings(dev->of_node,
  795. "reset-names");
  796. if (sc->reset_count == -EINVAL) {
  797. sc->reset_count = 0;
  798. } else if (sc->reset_count < 0) {
  799. dev_err(dev, "Failed to get reset clock names\n");
  800. return sc->reset_count;
  801. }
  802. }
  803. if (of_find_property(dev->of_node, "qcom,support-hw-trigger", NULL)) {
  804. (*init_data)->constraints.valid_ops_mask |=
  805. REGULATOR_CHANGE_MODE;
  806. (*init_data)->constraints.valid_modes_mask |=
  807. REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
  808. }
  809. return 0;
  810. }
  811. static int gdsc_get_resources(struct gdsc *sc, struct platform_device *pdev)
  812. {
  813. struct device *dev = &pdev->dev;
  814. struct resource *res;
  815. int ret, i;
  816. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  817. if (res == NULL) {
  818. dev_err(dev, "Failed to get address resource\n");
  819. return -EINVAL;
  820. }
  821. sc->gdscr = devm_ioremap(dev, res->start, resource_size(res));
  822. if (sc->gdscr == NULL)
  823. return -ENOMEM;
  824. if (of_property_read_bool(dev->of_node, "qcom,no-config-gdscr"))
  825. gdsc_regmap_config.max_register = 0;
  826. sc->regmap = devm_regmap_init_mmio(dev, sc->gdscr, &gdsc_regmap_config);
  827. if (!sc->regmap) {
  828. dev_err(dev, "Couldn't get regmap\n");
  829. return -EINVAL;
  830. }
  831. sc->clocks = devm_kcalloc(dev, sc->clock_count, sizeof(*sc->clocks),
  832. GFP_KERNEL);
  833. if (sc->clock_count && !sc->clocks)
  834. return -ENOMEM;
  835. sc->root_clk_idx = -1;
  836. for (i = 0; i < sc->clock_count; i++) {
  837. const char *clock_name;
  838. of_property_read_string_index(dev->of_node, "clock-names", i,
  839. &clock_name);
  840. sc->clocks[i] = devm_clk_get(dev, clock_name);
  841. if (IS_ERR(sc->clocks[i])) {
  842. ret = PTR_ERR(sc->clocks[i]);
  843. if (ret != -EPROBE_DEFER)
  844. dev_err(dev, "Failed to get %s, ret=%d\n",
  845. clock_name, ret);
  846. return ret;
  847. }
  848. if (!strcmp(clock_name, "core_root_clk"))
  849. sc->root_clk_idx = i;
  850. }
  851. sc->paths = devm_kcalloc(dev, sc->path_count, sizeof(*sc->paths), GFP_KERNEL);
  852. if (sc->path_count && !sc->paths)
  853. return -ENOMEM;
  854. for (i = 0; i < sc->path_count; i++) {
  855. const char *name;
  856. of_property_read_string_index(dev->of_node, "interconnect-names", i,
  857. &name);
  858. sc->paths[i] = of_icc_get(dev, name);
  859. if (IS_ERR(sc->paths[i])) {
  860. ret = PTR_ERR(sc->paths[i]);
  861. if (ret != -EPROBE_DEFER)
  862. dev_err(dev, "Failed to get path %s, ret=%d\n",
  863. name, ret);
  864. return ret;
  865. }
  866. }
  867. if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
  868. dev_err(dev, "Failed to get root clock name\n");
  869. return -EINVAL;
  870. }
  871. if (!sc->toggle_logic) {
  872. sc->reset_clocks = devm_kcalloc(&pdev->dev, sc->reset_count,
  873. sizeof(*sc->reset_clocks),
  874. GFP_KERNEL);
  875. if (sc->reset_count && !sc->reset_clocks)
  876. return -ENOMEM;
  877. for (i = 0; i < sc->reset_count; i++) {
  878. const char *reset_name;
  879. of_property_read_string_index(pdev->dev.of_node,
  880. "reset-names", i, &reset_name);
  881. sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
  882. reset_name);
  883. if (IS_ERR(sc->reset_clocks[i])) {
  884. ret = PTR_ERR(sc->reset_clocks[i]);
  885. if (ret != -EPROBE_DEFER)
  886. dev_err(&pdev->dev, "Failed to get %s, ret=%d\n",
  887. reset_name, ret);
  888. return ret;
  889. }
  890. }
  891. }
  892. return 0;
  893. }
  894. static int gdsc_probe(struct platform_device *pdev)
  895. {
  896. static atomic_t gdsc_count = ATOMIC_INIT(-1);
  897. struct regulator_config reg_config = {};
  898. struct regulator_init_data *init_data = NULL;
  899. struct device *dev = &pdev->dev;
  900. struct gdsc *sc;
  901. uint32_t regval, clk_dis_wait_val = 0;
  902. int ret;
  903. sc = devm_kzalloc(dev, sizeof(*sc), GFP_KERNEL);
  904. if (sc == NULL)
  905. return -ENOMEM;
  906. ret = gdsc_parse_dt_data(sc, dev, &init_data);
  907. if (ret)
  908. return ret;
  909. ret = gdsc_get_resources(sc, pdev);
  910. if (ret)
  911. return ret;
  912. /*
  913. * Disable HW trigger: collapse/restore occur based on registers writes.
  914. * Disable SW override: Use hardware state-machine for sequencing.
  915. */
  916. regmap_read(sc->regmap, REG_OFFSET, &regval);
  917. regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
  918. if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
  919. sc->mbox_client.dev = &pdev->dev;
  920. sc->mbox_client.tx_block = true;
  921. sc->mbox_client.tx_tout = MBOX_TOUT_MS;
  922. sc->mbox_client.knows_txdone = false;
  923. sc->mbox = mbox_request_channel(&sc->mbox_client, 0);
  924. if (IS_ERR(sc->mbox)) {
  925. ret = PTR_ERR(sc->mbox);
  926. dev_err(&pdev->dev, "mailbox channel request failed, ret=%d\n",
  927. ret);
  928. if (ret == -EAGAIN)
  929. ret = -EPROBE_DEFER;
  930. sc->mbox = NULL;
  931. goto err;
  932. }
  933. }
  934. if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
  935. &clk_dis_wait_val)) {
  936. clk_dis_wait_val = clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
  937. /* Configure wait time between states. */
  938. regval &= ~(CLK_DIS_WAIT_MASK);
  939. regval |= clk_dis_wait_val;
  940. }
  941. regmap_write(sc->regmap, REG_OFFSET, regval);
  942. if (!sc->toggle_logic) {
  943. regval &= ~SW_COLLAPSE_MASK;
  944. regmap_write(sc->regmap, REG_OFFSET, regval);
  945. ret = check_gdsc_status(sc, dev, ENABLED);
  946. if (ret)
  947. goto err;
  948. }
  949. ret = gdsc_init_is_enabled(sc);
  950. if (ret) {
  951. dev_err(dev, "%s failed to get initial enable state, ret=%d\n",
  952. sc->rdesc.name, ret);
  953. goto err;
  954. }
  955. ret = gdsc_init_hw_ctrl_mode(sc);
  956. if (ret) {
  957. dev_err(dev, "%s failed to get initial hw_ctrl state, ret=%d\n",
  958. sc->rdesc.name, ret);
  959. goto err;
  960. }
  961. sc->rdesc.id = atomic_inc_return(&gdsc_count);
  962. sc->rdesc.ops = &gdsc_ops;
  963. sc->rdesc.type = REGULATOR_VOLTAGE;
  964. sc->rdesc.owner = THIS_MODULE;
  965. reg_config.dev = dev;
  966. reg_config.init_data = init_data;
  967. reg_config.driver_data = sc;
  968. reg_config.of_node = dev->of_node;
  969. reg_config.regmap = sc->regmap;
  970. sc->rdev = devm_regulator_register(dev, &sc->rdesc, &reg_config);
  971. if (IS_ERR(sc->rdev)) {
  972. ret = PTR_ERR(sc->rdev);
  973. dev_err(dev, "regulator_register(\"%s\") failed, ret=%d\n",
  974. sc->rdesc.name, ret);
  975. goto err;
  976. }
  977. ret = devm_regulator_proxy_consumer_register(dev, dev->of_node);
  978. if (ret) {
  979. dev_err(dev, "failed to register proxy consumer, ret=%d\n",
  980. ret);
  981. goto err;
  982. }
  983. ret = devm_regulator_debug_register(dev, sc->rdev);
  984. if (ret)
  985. dev_err(dev, "failed to register debug regulator, ret=%d\n",
  986. ret);
  987. platform_set_drvdata(pdev, sc);
  988. return 0;
  989. err:
  990. if (sc->mbox)
  991. mbox_free_channel(sc->mbox);
  992. return ret;
  993. }
  994. static const struct of_device_id gdsc_match_table[] = {
  995. { .compatible = "qcom,gdsc" },
  996. {}
  997. };
  998. static struct platform_driver gdsc_driver = {
  999. .probe = gdsc_probe,
  1000. .driver = {
  1001. .name = "gdsc",
  1002. .of_match_table = gdsc_match_table,
  1003. .sync_state = regulator_proxy_consumer_sync_state,
  1004. },
  1005. };
  1006. static int __init gdsc_init(void)
  1007. {
  1008. return platform_driver_register(&gdsc_driver);
  1009. }
  1010. subsys_initcall(gdsc_init);
  1011. static void __exit gdsc_exit(void)
  1012. {
  1013. platform_driver_unregister(&gdsc_driver);
  1014. }
  1015. module_exit(gdsc_exit);
  1016. MODULE_DESCRIPTION("GDSC regulator control library");
  1017. MODULE_LICENSE("GPL v2");