clock_ops.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
  4. *
  5. * Copyright (c) 2011 Rafael J. Wysocki <[email protected]>, Renesas Electronics Corp.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/device.h>
  9. #include <linux/io.h>
  10. #include <linux/pm.h>
  11. #include <linux/pm_clock.h>
  12. #include <linux/clk.h>
  13. #include <linux/clkdev.h>
  14. #include <linux/of_clk.h>
  15. #include <linux/slab.h>
  16. #include <linux/err.h>
  17. #include <linux/pm_domain.h>
  18. #include <linux/pm_runtime.h>
  19. #ifdef CONFIG_PM_CLK
  20. enum pce_status {
  21. PCE_STATUS_NONE = 0,
  22. PCE_STATUS_ACQUIRED,
  23. PCE_STATUS_PREPARED,
  24. PCE_STATUS_ENABLED,
  25. PCE_STATUS_ERROR,
  26. };
  27. struct pm_clock_entry {
  28. struct list_head node;
  29. char *con_id;
  30. struct clk *clk;
  31. enum pce_status status;
  32. bool enabled_when_prepared;
  33. };
  34. /**
  35. * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
  36. * entry list.
  37. * @psd: pm_subsys_data instance corresponding to the PM clock entry list
  38. * and clk_op_might_sleep count to be modified.
  39. *
  40. * Get exclusive access before modifying the PM clock entry list and the
  41. * clock_op_might_sleep count to guard against concurrent modifications.
  42. * This also protects against a concurrent clock_op_might_sleep and PM clock
  43. * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
  44. * happen in atomic context, hence both the mutex and the spinlock must be
  45. * taken here.
  46. */
  47. static void pm_clk_list_lock(struct pm_subsys_data *psd)
  48. __acquires(&psd->lock)
  49. {
  50. mutex_lock(&psd->clock_mutex);
  51. spin_lock_irq(&psd->lock);
  52. }
  53. /**
  54. * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
  55. * @psd: the same pm_subsys_data instance previously passed to
  56. * pm_clk_list_lock().
  57. */
  58. static void pm_clk_list_unlock(struct pm_subsys_data *psd)
  59. __releases(&psd->lock)
  60. {
  61. spin_unlock_irq(&psd->lock);
  62. mutex_unlock(&psd->clock_mutex);
  63. }
  64. /**
  65. * pm_clk_op_lock - ensure exclusive access for performing clock operations.
  66. * @psd: pm_subsys_data instance corresponding to the PM clock entry list
  67. * and clk_op_might_sleep count being used.
  68. * @flags: stored irq flags.
  69. * @fn: string for the caller function's name.
  70. *
  71. * This is used by pm_clk_suspend() and pm_clk_resume() to guard
  72. * against concurrent modifications to the clock entry list and the
  73. * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
  74. * only the mutex can be locked and those functions can only be used in
  75. * non atomic context. If clock_op_might_sleep == 0 then these functions
  76. * may be used in any context and only the spinlock can be locked.
  77. * Returns -EINVAL if called in atomic context when clock ops might sleep.
  78. */
  79. static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
  80. const char *fn)
  81. /* sparse annotations don't work here as exit state isn't static */
  82. {
  83. bool atomic_context = in_atomic() || irqs_disabled();
  84. try_again:
  85. spin_lock_irqsave(&psd->lock, *flags);
  86. if (!psd->clock_op_might_sleep) {
  87. /* the __release is there to work around sparse limitations */
  88. __release(&psd->lock);
  89. return 0;
  90. }
  91. /* bail out if in atomic context */
  92. if (atomic_context) {
  93. pr_err("%s: atomic context with clock_ops_might_sleep = %d",
  94. fn, psd->clock_op_might_sleep);
  95. spin_unlock_irqrestore(&psd->lock, *flags);
  96. might_sleep();
  97. return -EPERM;
  98. }
  99. /* we must switch to the mutex */
  100. spin_unlock_irqrestore(&psd->lock, *flags);
  101. mutex_lock(&psd->clock_mutex);
  102. /*
  103. * There was a possibility for psd->clock_op_might_sleep
  104. * to become 0 above. Keep the mutex only if not the case.
  105. */
  106. if (likely(psd->clock_op_might_sleep))
  107. return 0;
  108. mutex_unlock(&psd->clock_mutex);
  109. goto try_again;
  110. }
  111. /**
  112. * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
  113. * @psd: the same pm_subsys_data instance previously passed to
  114. * pm_clk_op_lock().
  115. * @flags: irq flags provided by pm_clk_op_lock().
  116. */
  117. static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
  118. /* sparse annotations don't work here as entry state isn't static */
  119. {
  120. if (psd->clock_op_might_sleep) {
  121. mutex_unlock(&psd->clock_mutex);
  122. } else {
  123. /* the __acquire is there to work around sparse limitations */
  124. __acquire(&psd->lock);
  125. spin_unlock_irqrestore(&psd->lock, *flags);
  126. }
  127. }
  128. /**
  129. * __pm_clk_enable - Enable a clock, reporting any errors
  130. * @dev: The device for the given clock
  131. * @ce: PM clock entry corresponding to the clock.
  132. */
  133. static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
  134. {
  135. int ret;
  136. switch (ce->status) {
  137. case PCE_STATUS_ACQUIRED:
  138. ret = clk_prepare_enable(ce->clk);
  139. break;
  140. case PCE_STATUS_PREPARED:
  141. ret = clk_enable(ce->clk);
  142. break;
  143. default:
  144. return;
  145. }
  146. if (!ret)
  147. ce->status = PCE_STATUS_ENABLED;
  148. else
  149. dev_err(dev, "%s: failed to enable clk %p, error %d\n",
  150. __func__, ce->clk, ret);
  151. }
  152. /**
  153. * pm_clk_acquire - Acquire a device clock.
  154. * @dev: Device whose clock is to be acquired.
  155. * @ce: PM clock entry corresponding to the clock.
  156. */
  157. static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
  158. {
  159. if (!ce->clk)
  160. ce->clk = clk_get(dev, ce->con_id);
  161. if (IS_ERR(ce->clk)) {
  162. ce->status = PCE_STATUS_ERROR;
  163. return;
  164. } else if (clk_is_enabled_when_prepared(ce->clk)) {
  165. /* we defer preparing the clock in that case */
  166. ce->status = PCE_STATUS_ACQUIRED;
  167. ce->enabled_when_prepared = true;
  168. } else if (clk_prepare(ce->clk)) {
  169. ce->status = PCE_STATUS_ERROR;
  170. dev_err(dev, "clk_prepare() failed\n");
  171. return;
  172. } else {
  173. ce->status = PCE_STATUS_PREPARED;
  174. }
  175. dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
  176. ce->clk, ce->con_id);
  177. }
  178. static int __pm_clk_add(struct device *dev, const char *con_id,
  179. struct clk *clk)
  180. {
  181. struct pm_subsys_data *psd = dev_to_psd(dev);
  182. struct pm_clock_entry *ce;
  183. if (!psd)
  184. return -EINVAL;
  185. ce = kzalloc(sizeof(*ce), GFP_KERNEL);
  186. if (!ce)
  187. return -ENOMEM;
  188. if (con_id) {
  189. ce->con_id = kstrdup(con_id, GFP_KERNEL);
  190. if (!ce->con_id) {
  191. kfree(ce);
  192. return -ENOMEM;
  193. }
  194. } else {
  195. if (IS_ERR(clk)) {
  196. kfree(ce);
  197. return -ENOENT;
  198. }
  199. ce->clk = clk;
  200. }
  201. pm_clk_acquire(dev, ce);
  202. pm_clk_list_lock(psd);
  203. list_add_tail(&ce->node, &psd->clock_list);
  204. if (ce->enabled_when_prepared)
  205. psd->clock_op_might_sleep++;
  206. pm_clk_list_unlock(psd);
  207. return 0;
  208. }
  209. /**
  210. * pm_clk_add - Start using a device clock for power management.
  211. * @dev: Device whose clock is going to be used for power management.
  212. * @con_id: Connection ID of the clock.
  213. *
  214. * Add the clock represented by @con_id to the list of clocks used for
  215. * the power management of @dev.
  216. */
  217. int pm_clk_add(struct device *dev, const char *con_id)
  218. {
  219. return __pm_clk_add(dev, con_id, NULL);
  220. }
  221. EXPORT_SYMBOL_GPL(pm_clk_add);
  222. /**
  223. * pm_clk_add_clk - Start using a device clock for power management.
  224. * @dev: Device whose clock is going to be used for power management.
  225. * @clk: Clock pointer
  226. *
  227. * Add the clock to the list of clocks used for the power management of @dev.
  228. * The power-management code will take control of the clock reference, so
  229. * callers should not call clk_put() on @clk after this function sucessfully
  230. * returned.
  231. */
  232. int pm_clk_add_clk(struct device *dev, struct clk *clk)
  233. {
  234. return __pm_clk_add(dev, NULL, clk);
  235. }
  236. EXPORT_SYMBOL_GPL(pm_clk_add_clk);
  237. /**
  238. * of_pm_clk_add_clk - Start using a device clock for power management.
  239. * @dev: Device whose clock is going to be used for power management.
  240. * @name: Name of clock that is going to be used for power management.
  241. *
  242. * Add the clock described in the 'clocks' device-tree node that matches
  243. * with the 'name' provided, to the list of clocks used for the power
  244. * management of @dev. On success, returns 0. Returns a negative error
  245. * code if the clock is not found or cannot be added.
  246. */
  247. int of_pm_clk_add_clk(struct device *dev, const char *name)
  248. {
  249. struct clk *clk;
  250. int ret;
  251. if (!dev || !dev->of_node || !name)
  252. return -EINVAL;
  253. clk = of_clk_get_by_name(dev->of_node, name);
  254. if (IS_ERR(clk))
  255. return PTR_ERR(clk);
  256. ret = pm_clk_add_clk(dev, clk);
  257. if (ret) {
  258. clk_put(clk);
  259. return ret;
  260. }
  261. return 0;
  262. }
  263. EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
  264. /**
  265. * of_pm_clk_add_clks - Start using device clock(s) for power management.
  266. * @dev: Device whose clock(s) is going to be used for power management.
  267. *
  268. * Add a series of clocks described in the 'clocks' device-tree node for
  269. * a device to the list of clocks used for the power management of @dev.
  270. * On success, returns the number of clocks added. Returns a negative
  271. * error code if there are no clocks in the device node for the device
  272. * or if adding a clock fails.
  273. */
  274. int of_pm_clk_add_clks(struct device *dev)
  275. {
  276. struct clk **clks;
  277. int i, count;
  278. int ret;
  279. if (!dev || !dev->of_node)
  280. return -EINVAL;
  281. count = of_clk_get_parent_count(dev->of_node);
  282. if (count <= 0)
  283. return -ENODEV;
  284. clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
  285. if (!clks)
  286. return -ENOMEM;
  287. for (i = 0; i < count; i++) {
  288. clks[i] = of_clk_get(dev->of_node, i);
  289. if (IS_ERR(clks[i])) {
  290. ret = PTR_ERR(clks[i]);
  291. goto error;
  292. }
  293. ret = pm_clk_add_clk(dev, clks[i]);
  294. if (ret) {
  295. clk_put(clks[i]);
  296. goto error;
  297. }
  298. }
  299. kfree(clks);
  300. return i;
  301. error:
  302. while (i--)
  303. pm_clk_remove_clk(dev, clks[i]);
  304. kfree(clks);
  305. return ret;
  306. }
  307. EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
  308. /**
  309. * __pm_clk_remove - Destroy PM clock entry.
  310. * @ce: PM clock entry to destroy.
  311. */
  312. static void __pm_clk_remove(struct pm_clock_entry *ce)
  313. {
  314. if (!ce)
  315. return;
  316. switch (ce->status) {
  317. case PCE_STATUS_ENABLED:
  318. clk_disable(ce->clk);
  319. fallthrough;
  320. case PCE_STATUS_PREPARED:
  321. clk_unprepare(ce->clk);
  322. fallthrough;
  323. case PCE_STATUS_ACQUIRED:
  324. case PCE_STATUS_ERROR:
  325. if (!IS_ERR(ce->clk))
  326. clk_put(ce->clk);
  327. break;
  328. default:
  329. break;
  330. }
  331. kfree(ce->con_id);
  332. kfree(ce);
  333. }
  334. /**
  335. * pm_clk_remove - Stop using a device clock for power management.
  336. * @dev: Device whose clock should not be used for PM any more.
  337. * @con_id: Connection ID of the clock.
  338. *
  339. * Remove the clock represented by @con_id from the list of clocks used for
  340. * the power management of @dev.
  341. */
  342. void pm_clk_remove(struct device *dev, const char *con_id)
  343. {
  344. struct pm_subsys_data *psd = dev_to_psd(dev);
  345. struct pm_clock_entry *ce;
  346. if (!psd)
  347. return;
  348. pm_clk_list_lock(psd);
  349. list_for_each_entry(ce, &psd->clock_list, node) {
  350. if (!con_id && !ce->con_id)
  351. goto remove;
  352. else if (!con_id || !ce->con_id)
  353. continue;
  354. else if (!strcmp(con_id, ce->con_id))
  355. goto remove;
  356. }
  357. pm_clk_list_unlock(psd);
  358. return;
  359. remove:
  360. list_del(&ce->node);
  361. if (ce->enabled_when_prepared)
  362. psd->clock_op_might_sleep--;
  363. pm_clk_list_unlock(psd);
  364. __pm_clk_remove(ce);
  365. }
  366. EXPORT_SYMBOL_GPL(pm_clk_remove);
  367. /**
  368. * pm_clk_remove_clk - Stop using a device clock for power management.
  369. * @dev: Device whose clock should not be used for PM any more.
  370. * @clk: Clock pointer
  371. *
  372. * Remove the clock pointed to by @clk from the list of clocks used for
  373. * the power management of @dev.
  374. */
  375. void pm_clk_remove_clk(struct device *dev, struct clk *clk)
  376. {
  377. struct pm_subsys_data *psd = dev_to_psd(dev);
  378. struct pm_clock_entry *ce;
  379. if (!psd || !clk)
  380. return;
  381. pm_clk_list_lock(psd);
  382. list_for_each_entry(ce, &psd->clock_list, node) {
  383. if (clk == ce->clk)
  384. goto remove;
  385. }
  386. pm_clk_list_unlock(psd);
  387. return;
  388. remove:
  389. list_del(&ce->node);
  390. if (ce->enabled_when_prepared)
  391. psd->clock_op_might_sleep--;
  392. pm_clk_list_unlock(psd);
  393. __pm_clk_remove(ce);
  394. }
  395. EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
  396. /**
  397. * pm_clk_init - Initialize a device's list of power management clocks.
  398. * @dev: Device to initialize the list of PM clocks for.
  399. *
  400. * Initialize the lock and clock_list members of the device's pm_subsys_data
  401. * object, set the count of clocks that might sleep to 0.
  402. */
  403. void pm_clk_init(struct device *dev)
  404. {
  405. struct pm_subsys_data *psd = dev_to_psd(dev);
  406. if (psd) {
  407. INIT_LIST_HEAD(&psd->clock_list);
  408. mutex_init(&psd->clock_mutex);
  409. psd->clock_op_might_sleep = 0;
  410. }
  411. }
  412. EXPORT_SYMBOL_GPL(pm_clk_init);
  413. /**
  414. * pm_clk_create - Create and initialize a device's list of PM clocks.
  415. * @dev: Device to create and initialize the list of PM clocks for.
  416. *
  417. * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
  418. * members and make the @dev's power.subsys_data field point to it.
  419. */
  420. int pm_clk_create(struct device *dev)
  421. {
  422. return dev_pm_get_subsys_data(dev);
  423. }
  424. EXPORT_SYMBOL_GPL(pm_clk_create);
  425. /**
  426. * pm_clk_destroy - Destroy a device's list of power management clocks.
  427. * @dev: Device to destroy the list of PM clocks for.
  428. *
  429. * Clear the @dev's power.subsys_data field, remove the list of clock entries
  430. * from the struct pm_subsys_data object pointed to by it before and free
  431. * that object.
  432. */
  433. void pm_clk_destroy(struct device *dev)
  434. {
  435. struct pm_subsys_data *psd = dev_to_psd(dev);
  436. struct pm_clock_entry *ce, *c;
  437. struct list_head list;
  438. if (!psd)
  439. return;
  440. INIT_LIST_HEAD(&list);
  441. pm_clk_list_lock(psd);
  442. list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
  443. list_move(&ce->node, &list);
  444. psd->clock_op_might_sleep = 0;
  445. pm_clk_list_unlock(psd);
  446. dev_pm_put_subsys_data(dev);
  447. list_for_each_entry_safe_reverse(ce, c, &list, node) {
  448. list_del(&ce->node);
  449. __pm_clk_remove(ce);
  450. }
  451. }
  452. EXPORT_SYMBOL_GPL(pm_clk_destroy);
  453. static void pm_clk_destroy_action(void *data)
  454. {
  455. pm_clk_destroy(data);
  456. }
  457. int devm_pm_clk_create(struct device *dev)
  458. {
  459. int ret;
  460. ret = pm_clk_create(dev);
  461. if (ret)
  462. return ret;
  463. return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
  464. }
  465. EXPORT_SYMBOL_GPL(devm_pm_clk_create);
  466. /**
  467. * pm_clk_suspend - Disable clocks in a device's PM clock list.
  468. * @dev: Device to disable the clocks for.
  469. */
  470. int pm_clk_suspend(struct device *dev)
  471. {
  472. struct pm_subsys_data *psd = dev_to_psd(dev);
  473. struct pm_clock_entry *ce;
  474. unsigned long flags;
  475. int ret;
  476. dev_dbg(dev, "%s()\n", __func__);
  477. if (!psd)
  478. return 0;
  479. ret = pm_clk_op_lock(psd, &flags, __func__);
  480. if (ret)
  481. return ret;
  482. list_for_each_entry_reverse(ce, &psd->clock_list, node) {
  483. if (ce->status == PCE_STATUS_ENABLED) {
  484. if (ce->enabled_when_prepared) {
  485. clk_disable_unprepare(ce->clk);
  486. ce->status = PCE_STATUS_ACQUIRED;
  487. } else {
  488. clk_disable(ce->clk);
  489. ce->status = PCE_STATUS_PREPARED;
  490. }
  491. }
  492. }
  493. pm_clk_op_unlock(psd, &flags);
  494. return 0;
  495. }
  496. EXPORT_SYMBOL_GPL(pm_clk_suspend);
  497. /**
  498. * pm_clk_resume - Enable clocks in a device's PM clock list.
  499. * @dev: Device to enable the clocks for.
  500. */
  501. int pm_clk_resume(struct device *dev)
  502. {
  503. struct pm_subsys_data *psd = dev_to_psd(dev);
  504. struct pm_clock_entry *ce;
  505. unsigned long flags;
  506. int ret;
  507. dev_dbg(dev, "%s()\n", __func__);
  508. if (!psd)
  509. return 0;
  510. ret = pm_clk_op_lock(psd, &flags, __func__);
  511. if (ret)
  512. return ret;
  513. list_for_each_entry(ce, &psd->clock_list, node)
  514. __pm_clk_enable(dev, ce);
  515. pm_clk_op_unlock(psd, &flags);
  516. return 0;
  517. }
  518. EXPORT_SYMBOL_GPL(pm_clk_resume);
  519. /**
  520. * pm_clk_notify - Notify routine for device addition and removal.
  521. * @nb: Notifier block object this function is a member of.
  522. * @action: Operation being carried out by the caller.
  523. * @data: Device the routine is being run for.
  524. *
  525. * For this function to work, @nb must be a member of an object of type
  526. * struct pm_clk_notifier_block containing all of the requisite data.
  527. * Specifically, the pm_domain member of that object is copied to the device's
  528. * pm_domain field and its con_ids member is used to populate the device's list
  529. * of PM clocks, depending on @action.
  530. *
  531. * If the device's pm_domain field is already populated with a value different
  532. * from the one stored in the struct pm_clk_notifier_block object, the function
  533. * does nothing.
  534. */
  535. static int pm_clk_notify(struct notifier_block *nb,
  536. unsigned long action, void *data)
  537. {
  538. struct pm_clk_notifier_block *clknb;
  539. struct device *dev = data;
  540. char **con_id;
  541. int error;
  542. dev_dbg(dev, "%s() %ld\n", __func__, action);
  543. clknb = container_of(nb, struct pm_clk_notifier_block, nb);
  544. switch (action) {
  545. case BUS_NOTIFY_ADD_DEVICE:
  546. if (dev->pm_domain)
  547. break;
  548. error = pm_clk_create(dev);
  549. if (error)
  550. break;
  551. dev_pm_domain_set(dev, clknb->pm_domain);
  552. if (clknb->con_ids[0]) {
  553. for (con_id = clknb->con_ids; *con_id; con_id++)
  554. pm_clk_add(dev, *con_id);
  555. } else {
  556. pm_clk_add(dev, NULL);
  557. }
  558. break;
  559. case BUS_NOTIFY_DEL_DEVICE:
  560. if (dev->pm_domain != clknb->pm_domain)
  561. break;
  562. dev_pm_domain_set(dev, NULL);
  563. pm_clk_destroy(dev);
  564. break;
  565. }
  566. return 0;
  567. }
  568. int pm_clk_runtime_suspend(struct device *dev)
  569. {
  570. int ret;
  571. dev_dbg(dev, "%s\n", __func__);
  572. ret = pm_generic_runtime_suspend(dev);
  573. if (ret) {
  574. dev_err(dev, "failed to suspend device\n");
  575. return ret;
  576. }
  577. ret = pm_clk_suspend(dev);
  578. if (ret) {
  579. dev_err(dev, "failed to suspend clock\n");
  580. pm_generic_runtime_resume(dev);
  581. return ret;
  582. }
  583. return 0;
  584. }
  585. EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
  586. int pm_clk_runtime_resume(struct device *dev)
  587. {
  588. int ret;
  589. dev_dbg(dev, "%s\n", __func__);
  590. ret = pm_clk_resume(dev);
  591. if (ret) {
  592. dev_err(dev, "failed to resume clock\n");
  593. return ret;
  594. }
  595. return pm_generic_runtime_resume(dev);
  596. }
  597. EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
  598. #else /* !CONFIG_PM_CLK */
  599. /**
  600. * enable_clock - Enable a device clock.
  601. * @dev: Device whose clock is to be enabled.
  602. * @con_id: Connection ID of the clock.
  603. */
  604. static void enable_clock(struct device *dev, const char *con_id)
  605. {
  606. struct clk *clk;
  607. clk = clk_get(dev, con_id);
  608. if (!IS_ERR(clk)) {
  609. clk_prepare_enable(clk);
  610. clk_put(clk);
  611. dev_info(dev, "Runtime PM disabled, clock forced on.\n");
  612. }
  613. }
  614. /**
  615. * disable_clock - Disable a device clock.
  616. * @dev: Device whose clock is to be disabled.
  617. * @con_id: Connection ID of the clock.
  618. */
  619. static void disable_clock(struct device *dev, const char *con_id)
  620. {
  621. struct clk *clk;
  622. clk = clk_get(dev, con_id);
  623. if (!IS_ERR(clk)) {
  624. clk_disable_unprepare(clk);
  625. clk_put(clk);
  626. dev_info(dev, "Runtime PM disabled, clock forced off.\n");
  627. }
  628. }
  629. /**
  630. * pm_clk_notify - Notify routine for device addition and removal.
  631. * @nb: Notifier block object this function is a member of.
  632. * @action: Operation being carried out by the caller.
  633. * @data: Device the routine is being run for.
  634. *
  635. * For this function to work, @nb must be a member of an object of type
  636. * struct pm_clk_notifier_block containing all of the requisite data.
  637. * Specifically, the con_ids member of that object is used to enable or disable
  638. * the device's clocks, depending on @action.
  639. */
  640. static int pm_clk_notify(struct notifier_block *nb,
  641. unsigned long action, void *data)
  642. {
  643. struct pm_clk_notifier_block *clknb;
  644. struct device *dev = data;
  645. char **con_id;
  646. dev_dbg(dev, "%s() %ld\n", __func__, action);
  647. clknb = container_of(nb, struct pm_clk_notifier_block, nb);
  648. switch (action) {
  649. case BUS_NOTIFY_BIND_DRIVER:
  650. if (clknb->con_ids[0]) {
  651. for (con_id = clknb->con_ids; *con_id; con_id++)
  652. enable_clock(dev, *con_id);
  653. } else {
  654. enable_clock(dev, NULL);
  655. }
  656. break;
  657. case BUS_NOTIFY_DRIVER_NOT_BOUND:
  658. case BUS_NOTIFY_UNBOUND_DRIVER:
  659. if (clknb->con_ids[0]) {
  660. for (con_id = clknb->con_ids; *con_id; con_id++)
  661. disable_clock(dev, *con_id);
  662. } else {
  663. disable_clock(dev, NULL);
  664. }
  665. break;
  666. }
  667. return 0;
  668. }
  669. #endif /* !CONFIG_PM_CLK */
  670. /**
  671. * pm_clk_add_notifier - Add bus type notifier for power management clocks.
  672. * @bus: Bus type to add the notifier to.
  673. * @clknb: Notifier to be added to the given bus type.
  674. *
  675. * The nb member of @clknb is not expected to be initialized and its
  676. * notifier_call member will be replaced with pm_clk_notify(). However,
  677. * the remaining members of @clknb should be populated prior to calling this
  678. * routine.
  679. */
  680. void pm_clk_add_notifier(struct bus_type *bus,
  681. struct pm_clk_notifier_block *clknb)
  682. {
  683. if (!bus || !clknb)
  684. return;
  685. clknb->nb.notifier_call = pm_clk_notify;
  686. bus_register_notifier(bus, &clknb->nb);
  687. }
  688. EXPORT_SYMBOL_GPL(pm_clk_add_notifier);