common.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2014, 2017-2021, The Linux Foundation.
  4. * All rights reserved.
  5. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/module.h>
  9. #include <linux/regmap.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/clk-provider.h>
  12. #include <linux/reset-controller.h>
  13. #include <linux/of.h>
  14. #include <linux/clk/qcom.h>
  15. #include <linux/clk.h>
  16. #include <linux/interconnect.h>
  17. #include <linux/pm_clock.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/mfd/syscon.h>
  20. #include "common.h"
  21. #include "clk-opp.h"
  22. #include "clk-rcg.h"
  23. #include "clk-regmap.h"
  24. #include "reset.h"
  25. #include "gdsc.h"
  26. #include "vdd-level.h"
  27. #include "clk-debug.h"
  28. struct qcom_cc {
  29. struct qcom_reset_controller reset;
  30. struct clk_regmap **rclks;
  31. size_t num_rclks;
  32. struct clk_hw **clk_hws;
  33. size_t num_clk_hws;
  34. };
  35. int qcom_clk_crm_init(struct device *dev, struct clk_crm *crm)
  36. {
  37. char prop_name[32];
  38. if (!crm)
  39. return -EINVAL;
  40. if (!crm->initialized) {
  41. snprintf(prop_name, sizeof(prop_name), "qcom,%s-crmc", crm->name);
  42. if (of_find_property(dev->of_node, prop_name, NULL)) {
  43. crm->regmap_crmc =
  44. syscon_regmap_lookup_by_phandle(dev->of_node,
  45. prop_name);
  46. if (IS_ERR(crm->regmap_crmc)) {
  47. dev_err(dev, "%s regmap error\n", prop_name);
  48. return PTR_ERR(crm->regmap_crmc);
  49. }
  50. }
  51. if (crm->name) {
  52. crm->dev = crm_get_device(crm->name);
  53. if (IS_ERR(crm->dev)) {
  54. pr_err("%s Failed to get crm dev=%s, ret=%d\n",
  55. __func__, crm->name, PTR_ERR(crm->dev));
  56. return PTR_ERR(crm->dev);
  57. }
  58. }
  59. crm->initialized = true;
  60. }
  61. return 0;
  62. }
  63. EXPORT_SYMBOL(qcom_clk_crm_init);
  64. static int qcom_find_freq_index(const struct freq_tbl *f, unsigned long rate)
  65. {
  66. int index;
  67. for (index = 0; f->freq; f++, index++) {
  68. if (rate <= f->freq)
  69. return index;
  70. }
  71. return index - 1;
  72. }
  73. int qcom_find_crm_freq_index(const struct freq_tbl *f, unsigned long rate)
  74. {
  75. if (!f || !f->freq)
  76. return -EINVAL;
  77. /*
  78. * If rate is 0 return PERF_OL 0 index
  79. */
  80. if (!rate)
  81. return 0;
  82. /*
  83. * Return PERF_OL index + 1 as PERF_OL 0 is
  84. * treated as CLK OFF as per LUT population
  85. */
  86. return qcom_find_freq_index(f, rate) + 1;
  87. }
  88. EXPORT_SYMBOL(qcom_find_crm_freq_index);
  89. const
  90. struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
  91. {
  92. if (!f)
  93. return NULL;
  94. if (!f->freq)
  95. return f;
  96. for (; f->freq; f++)
  97. if (rate <= f->freq)
  98. return f;
  99. /* Default to our fastest rate */
  100. return f - 1;
  101. }
  102. EXPORT_SYMBOL_GPL(qcom_find_freq);
  103. const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
  104. unsigned long rate)
  105. {
  106. const struct freq_tbl *best = NULL;
  107. for ( ; f->freq; f++) {
  108. if (rate >= f->freq)
  109. best = f;
  110. else
  111. break;
  112. }
  113. return best;
  114. }
  115. EXPORT_SYMBOL_GPL(qcom_find_freq_floor);
  116. int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
  117. {
  118. int i, num_parents = clk_hw_get_num_parents(hw);
  119. for (i = 0; i < num_parents; i++)
  120. if (src == map[i].src)
  121. return i;
  122. return -ENOENT;
  123. }
  124. EXPORT_SYMBOL_GPL(qcom_find_src_index);
  125. int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
  126. {
  127. int i, num_parents = clk_hw_get_num_parents(hw);
  128. for (i = 0; i < num_parents; i++)
  129. if (cfg == map[i].cfg)
  130. return i;
  131. return -ENOENT;
  132. }
  133. EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
  134. struct regmap *
  135. qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
  136. {
  137. void __iomem *base;
  138. struct device *dev = &pdev->dev;
  139. base = devm_platform_ioremap_resource(pdev, 0);
  140. if (IS_ERR(base))
  141. return ERR_CAST(base);
  142. return devm_regmap_init_mmio(dev, base, desc->config);
  143. }
  144. EXPORT_SYMBOL_GPL(qcom_cc_map);
  145. void
  146. qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
  147. {
  148. u32 val;
  149. u32 mask;
  150. /* De-assert reset to FSM */
  151. regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0);
  152. /* Program bias count and lock count */
  153. val = bias_count << PLL_BIAS_COUNT_SHIFT |
  154. lock_count << PLL_LOCK_COUNT_SHIFT;
  155. mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
  156. mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
  157. regmap_update_bits(map, reg, mask, val);
  158. /* Enable PLL FSM voting */
  159. regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA);
  160. }
  161. EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
  162. static void qcom_cc_gdsc_unregister(void *data)
  163. {
  164. gdsc_unregister(data);
  165. }
  166. /*
  167. * Backwards compatibility with old DTs. Register a pass-through factor 1/1
  168. * clock to translate 'path' clk into 'name' clk and register the 'path'
  169. * clk as a fixed rate clock if it isn't present.
  170. */
  171. static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
  172. const char *name, unsigned long rate,
  173. bool add_factor)
  174. {
  175. struct device_node *node = NULL;
  176. struct device_node *clocks_node;
  177. struct clk_fixed_factor *factor;
  178. struct clk_fixed_rate *fixed;
  179. struct clk_init_data init_data = { };
  180. int ret;
  181. clocks_node = of_find_node_by_path("/clocks");
  182. if (clocks_node) {
  183. node = of_get_child_by_name(clocks_node, path);
  184. of_node_put(clocks_node);
  185. }
  186. if (!node) {
  187. fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
  188. if (!fixed)
  189. return -EINVAL;
  190. fixed->fixed_rate = rate;
  191. fixed->hw.init = &init_data;
  192. init_data.name = path;
  193. init_data.ops = &clk_fixed_rate_ops;
  194. ret = devm_clk_hw_register(dev, &fixed->hw);
  195. if (ret)
  196. return ret;
  197. }
  198. of_node_put(node);
  199. if (add_factor) {
  200. factor = devm_kzalloc(dev, sizeof(*factor), GFP_KERNEL);
  201. if (!factor)
  202. return -EINVAL;
  203. factor->mult = factor->div = 1;
  204. factor->hw.init = &init_data;
  205. init_data.name = name;
  206. init_data.parent_names = &path;
  207. init_data.num_parents = 1;
  208. init_data.flags = 0;
  209. init_data.ops = &clk_fixed_factor_ops;
  210. ret = devm_clk_hw_register(dev, &factor->hw);
  211. if (ret)
  212. return ret;
  213. }
  214. return 0;
  215. }
  216. int qcom_cc_register_board_clk(struct device *dev, const char *path,
  217. const char *name, unsigned long rate)
  218. {
  219. bool add_factor = true;
  220. /*
  221. * TODO: The RPM clock driver currently does not support the xo clock.
  222. * When xo is added to the RPM clock driver, we should change this
  223. * function to skip registration of xo factor clocks.
  224. */
  225. return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
  226. }
  227. EXPORT_SYMBOL_GPL(qcom_cc_register_board_clk);
  228. int qcom_cc_register_sleep_clk(struct device *dev)
  229. {
  230. return _qcom_cc_register_board_clk(dev, "sleep_clk", "sleep_clk_src",
  231. 32768, true);
  232. }
  233. EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
  234. /* Drop 'protected-clocks' from the list of clocks to register */
  235. static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc)
  236. {
  237. struct device_node *np = dev->of_node;
  238. struct property *prop;
  239. const __be32 *p;
  240. u32 i;
  241. of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
  242. if (i >= cc->num_rclks)
  243. continue;
  244. cc->rclks[i] = NULL;
  245. }
  246. }
  247. /* Set QCOM_CLK_IS_CRITICAL on clocks specified in dt */
  248. static void qcom_cc_set_critical(struct device *dev, struct qcom_cc *cc)
  249. {
  250. struct of_phandle_args args;
  251. struct device_node *np;
  252. struct property *prop;
  253. const __be32 *p;
  254. u32 clock_idx;
  255. u32 i;
  256. int cnt;
  257. of_property_for_each_u32(dev->of_node, "qcom,critical-clocks", prop, p, i) {
  258. if (i >= cc->num_rclks)
  259. continue;
  260. if (cc->rclks[i])
  261. cc->rclks[i]->flags |= QCOM_CLK_IS_CRITICAL;
  262. }
  263. of_property_for_each_u32(dev->of_node, "qcom,critical-devices", prop, p, i) {
  264. for (np = of_find_node_by_phandle(i); np; np = of_get_parent(np)) {
  265. if (!of_property_read_bool(np, "clocks")) {
  266. of_node_put(np);
  267. continue;
  268. }
  269. cnt = of_count_phandle_with_args(np, "clocks", "#clock-cells");
  270. for (i = 0; i < cnt; i++) {
  271. of_parse_phandle_with_args(np, "clocks", "#clock-cells",
  272. i, &args);
  273. clock_idx = args.args[0];
  274. if (args.np != dev->of_node || clock_idx >= cc->num_rclks)
  275. continue;
  276. if (cc->rclks[clock_idx])
  277. cc->rclks[clock_idx]->flags |= QCOM_CLK_IS_CRITICAL;
  278. of_node_put(args.np);
  279. }
  280. of_node_put(np);
  281. }
  282. }
  283. }
  284. static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
  285. void *data)
  286. {
  287. struct qcom_cc *cc = data;
  288. unsigned int idx = clkspec->args[0];
  289. if (idx < cc->num_clk_hws && cc->clk_hws[idx])
  290. return cc->clk_hws[idx];
  291. if (idx >= cc->num_rclks) {
  292. pr_err("%s: invalid index %u\n", __func__, idx);
  293. return ERR_PTR(-EINVAL);
  294. }
  295. return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
  296. }
  297. int qcom_cc_really_probe(struct platform_device *pdev,
  298. const struct qcom_cc_desc *desc, struct regmap *regmap)
  299. {
  300. int i, ret;
  301. struct device *dev = &pdev->dev;
  302. struct qcom_reset_controller *reset;
  303. struct qcom_cc *cc;
  304. struct gdsc_desc *scd;
  305. size_t num_clks = desc->num_clks;
  306. struct clk_regmap **rclks = desc->clks;
  307. size_t num_clk_hws = desc->num_clk_hws;
  308. struct clk_hw **clk_hws = desc->clk_hws;
  309. cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
  310. if (!cc)
  311. return -ENOMEM;
  312. reset = &cc->reset;
  313. reset->rcdev.of_node = dev->of_node;
  314. reset->rcdev.ops = &qcom_reset_ops;
  315. reset->rcdev.owner = dev->driver->owner;
  316. reset->rcdev.nr_resets = desc->num_resets;
  317. reset->regmap = regmap;
  318. reset->reset_map = desc->resets;
  319. ret = clk_regulator_init(&pdev->dev, desc);
  320. if (ret)
  321. return ret;
  322. ret = clk_vdd_proxy_vote(&pdev->dev, desc);
  323. if (ret)
  324. goto deinit_clk_regulator;
  325. if (desc->num_resets) {
  326. ret = devm_reset_controller_register(dev, &reset->rcdev);
  327. if (ret)
  328. goto proxy_unvote;
  329. }
  330. if (desc->gdscs && desc->num_gdscs) {
  331. scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
  332. if (!scd) {
  333. ret = -ENOMEM;
  334. goto proxy_unvote;
  335. }
  336. scd->dev = dev;
  337. scd->scs = desc->gdscs;
  338. scd->num = desc->num_gdscs;
  339. ret = gdsc_register(scd, &reset->rcdev, regmap);
  340. if (ret)
  341. goto proxy_unvote;
  342. ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
  343. scd);
  344. if (ret)
  345. goto proxy_unvote;
  346. }
  347. cc->rclks = rclks;
  348. cc->num_rclks = num_clks;
  349. cc->clk_hws = clk_hws;
  350. cc->num_clk_hws = num_clk_hws;
  351. qcom_cc_drop_protected(dev, cc);
  352. qcom_cc_set_critical(dev, cc);
  353. for (i = 0; i < num_clk_hws; i++) {
  354. if (!clk_hws[i])
  355. continue;
  356. ret = devm_clk_hw_register(dev, clk_hws[i]);
  357. if (ret)
  358. goto proxy_unvote;
  359. }
  360. for (i = 0; i < num_clks; i++) {
  361. if (!rclks[i])
  362. continue;
  363. ret = devm_clk_register_regmap(dev, rclks[i]);
  364. if (ret)
  365. goto proxy_unvote;
  366. clk_hw_populate_clock_opp_table(dev->of_node, &rclks[i]->hw);
  367. /*
  368. * Critical clocks are enabled by devm_clk_register_regmap()
  369. * and registration skipped. So remove from rclks so that the
  370. * get() callback returns NULL and client requests are stubbed.
  371. */
  372. if (rclks[i]->flags & QCOM_CLK_IS_CRITICAL)
  373. rclks[i] = NULL;
  374. }
  375. ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
  376. if (ret)
  377. goto proxy_unvote;
  378. return 0;
  379. proxy_unvote:
  380. clk_vdd_proxy_unvote(dev, desc);
  381. deinit_clk_regulator:
  382. clk_regulator_deinit(desc);
  383. return ret;
  384. }
  385. EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
  386. int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
  387. {
  388. struct regmap *regmap;
  389. regmap = qcom_cc_map(pdev, desc);
  390. if (IS_ERR(regmap))
  391. return PTR_ERR(regmap);
  392. return qcom_cc_really_probe(pdev, desc, regmap);
  393. }
  394. EXPORT_SYMBOL_GPL(qcom_cc_probe);
  395. int qcom_cc_probe_by_index(struct platform_device *pdev, int index,
  396. const struct qcom_cc_desc *desc)
  397. {
  398. struct regmap *regmap;
  399. void __iomem *base;
  400. base = devm_platform_ioremap_resource(pdev, index);
  401. if (IS_ERR(base))
  402. return -ENOMEM;
  403. regmap = devm_regmap_init_mmio(&pdev->dev, base, desc->config);
  404. if (IS_ERR(regmap))
  405. return PTR_ERR(regmap);
  406. return qcom_cc_really_probe(pdev, desc, regmap);
  407. }
  408. EXPORT_SYMBOL_GPL(qcom_cc_probe_by_index);
  409. void qcom_cc_sync_state(struct device *dev, const struct qcom_cc_desc *desc)
  410. {
  411. dev_info(dev, "sync-state\n");
  412. clk_sync_state(dev);
  413. clk_vdd_proxy_unvote(dev, desc);
  414. }
  415. EXPORT_SYMBOL(qcom_cc_sync_state);
  416. int qcom_clk_crm_set_rate(struct clk *clk,
  417. enum crm_drv_type client_type, u32 client_idx,
  418. u32 pwr_st, unsigned long rate)
  419. {
  420. struct clk_hw *hw;
  421. int ret;
  422. if (!clk)
  423. return -EINVAL;
  424. do {
  425. hw = __clk_get_hw(clk);
  426. if (clk_is_regmap_clk(hw)) {
  427. struct clk_regmap *rclk = to_clk_regmap(hw);
  428. if (rclk->ops && rclk->ops->set_crm_rate) {
  429. ret = rclk->ops->set_crm_rate(hw, client_type,
  430. client_idx, pwr_st, rate);
  431. return ret;
  432. }
  433. }
  434. } while ((clk = clk_get_parent(hw->clk)));
  435. return -EINVAL;
  436. }
  437. EXPORT_SYMBOL(qcom_clk_crm_set_rate);
  438. int qcom_clk_get_voltage(struct clk *clk, unsigned long rate)
  439. {
  440. struct clk_regmap *rclk;
  441. struct clk_hw *hw = __clk_get_hw(clk);
  442. int vdd_level;
  443. if (!clk_is_regmap_clk(hw))
  444. return -EINVAL;
  445. rclk = to_clk_regmap(hw);
  446. vdd_level = clk_find_vdd_level(hw, &rclk->vdd_data, rate);
  447. if (vdd_level < 0)
  448. return vdd_level;
  449. return clk_get_vdd_voltage(&rclk->vdd_data, vdd_level);
  450. }
  451. EXPORT_SYMBOL(qcom_clk_get_voltage);
  452. int qcom_clk_set_flags(struct clk *clk, unsigned long flags)
  453. {
  454. struct clk_regmap *rclk;
  455. struct clk_hw *hw;
  456. if (IS_ERR_OR_NULL(clk))
  457. return 0;
  458. hw = __clk_get_hw(clk);
  459. if (IS_ERR_OR_NULL(hw))
  460. return -EINVAL;
  461. if (!clk_is_regmap_clk(hw))
  462. return -EINVAL;
  463. rclk = to_clk_regmap(hw);
  464. if (rclk->ops && rclk->ops->set_flags)
  465. return rclk->ops->set_flags(hw, flags);
  466. return 0;
  467. }
  468. EXPORT_SYMBOL(qcom_clk_set_flags);
  469. int qcom_cc_runtime_init(struct platform_device *pdev,
  470. struct qcom_cc_desc *desc)
  471. {
  472. struct device *dev = &pdev->dev;
  473. struct clk *clk;
  474. int ret;
  475. clk = clk_get_optional(dev, "iface");
  476. if (IS_ERR(clk)) {
  477. if (PTR_ERR(clk) != -EPROBE_DEFER)
  478. dev_err(dev, "unable to get iface clock\n");
  479. return PTR_ERR(clk);
  480. }
  481. clk_put(clk);
  482. ret = clk_regulator_init(dev, desc);
  483. if (ret)
  484. return ret;
  485. desc->path = of_icc_get(dev, NULL);
  486. if (IS_ERR(desc->path)) {
  487. if (PTR_ERR(desc->path) != -EPROBE_DEFER)
  488. dev_err(dev, "error getting path\n");
  489. ret = PTR_ERR(desc->path);
  490. goto deinit_clk_regulator;
  491. }
  492. platform_set_drvdata(pdev, desc);
  493. pm_runtime_enable(dev);
  494. ret = pm_clk_create(dev);
  495. if (ret)
  496. goto disable_pm_runtime;
  497. ret = pm_clk_add(dev, "iface");
  498. if (ret < 0) {
  499. dev_err(dev, "failed to acquire iface clock\n");
  500. goto destroy_pm_clk;
  501. }
  502. return 0;
  503. destroy_pm_clk:
  504. pm_clk_destroy(dev);
  505. disable_pm_runtime:
  506. pm_runtime_disable(dev);
  507. icc_put(desc->path);
  508. deinit_clk_regulator:
  509. clk_regulator_deinit(desc);
  510. return ret;
  511. }
  512. EXPORT_SYMBOL(qcom_cc_runtime_init);
  513. int qcom_cc_runtime_resume(struct device *dev)
  514. {
  515. struct qcom_cc_desc *desc = dev_get_drvdata(dev);
  516. struct clk_vdd_class_data vdd_data = {0};
  517. int ret;
  518. int i;
  519. for (i = 0; i < desc->num_clk_regulators; i++) {
  520. vdd_data.vdd_class = desc->clk_regulators[i];
  521. if (!vdd_data.vdd_class)
  522. continue;
  523. ret = clk_vote_vdd_level(&vdd_data, 1);
  524. if (ret) {
  525. dev_warn(dev, "%s: failed to vote voltage\n", __func__);
  526. return ret;
  527. }
  528. }
  529. if (desc->path) {
  530. ret = icc_set_bw(desc->path, 0, 1);
  531. if (ret) {
  532. dev_warn(dev, "%s: failed to vote bw\n", __func__);
  533. return ret;
  534. }
  535. }
  536. ret = pm_clk_resume(dev);
  537. if (ret)
  538. dev_warn(dev, "%s: failed to enable clocks\n", __func__);
  539. return ret;
  540. }
  541. EXPORT_SYMBOL(qcom_cc_runtime_resume);
  542. int qcom_cc_runtime_suspend(struct device *dev)
  543. {
  544. struct qcom_cc_desc *desc = dev_get_drvdata(dev);
  545. struct clk_vdd_class_data vdd_data = {0};
  546. int ret;
  547. int i;
  548. ret = pm_clk_suspend(dev);
  549. if (ret)
  550. dev_warn(dev, "%s: failed to disable clocks\n", __func__);
  551. if (desc->path) {
  552. ret = icc_set_bw(desc->path, 0, 0);
  553. if (ret)
  554. dev_warn(dev, "%s: failed to unvote bw\n", __func__);
  555. }
  556. for (i = 0; i < desc->num_clk_regulators; i++) {
  557. vdd_data.vdd_class = desc->clk_regulators[i];
  558. if (!vdd_data.vdd_class)
  559. continue;
  560. ret = clk_unvote_vdd_level(&vdd_data, 1);
  561. if (ret)
  562. dev_warn(dev, "%s: failed to unvote voltage\n",
  563. __func__);
  564. }
  565. return 0;
  566. }
  567. EXPORT_SYMBOL(qcom_cc_runtime_suspend);
  568. static void __exit qcom_clk_exit(void)
  569. {
  570. clk_debug_exit();
  571. }
  572. module_exit(qcom_clk_exit);
  573. MODULE_DESCRIPTION("Common QCOM clock control library");
  574. MODULE_LICENSE("GPL v2");