clk.h 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/include/linux/clk.h
  4. *
  5. * Copyright (C) 2004 ARM Limited.
  6. * Written by Deep Blue Solutions Limited.
  7. * Copyright (C) 2011-2012 Linaro Ltd <[email protected]>
  8. */
  9. #ifndef __LINUX_CLK_H
  10. #define __LINUX_CLK_H
  11. #include <linux/err.h>
  12. #include <linux/kernel.h>
  13. #include <linux/notifier.h>
  14. struct device;
  15. struct clk;
  16. struct device_node;
  17. struct of_phandle_args;
  18. /**
  19. * DOC: clk notifier callback types
  20. *
  21. * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
  22. * to indicate that the rate change will proceed. Drivers must
  23. * immediately terminate any operations that will be affected by the
  24. * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
  25. * NOTIFY_STOP or NOTIFY_BAD.
  26. *
  27. * ABORT_RATE_CHANGE: called if the rate change failed for some reason
  28. * after PRE_RATE_CHANGE. In this case, all registered notifiers on
  29. * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
  30. * always return NOTIFY_DONE or NOTIFY_OK.
  31. *
  32. * POST_RATE_CHANGE - called after the clk rate change has successfully
  33. * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
  34. *
  35. */
  36. #define PRE_RATE_CHANGE BIT(0)
  37. #define POST_RATE_CHANGE BIT(1)
  38. #define ABORT_RATE_CHANGE BIT(2)
  39. /**
  40. * struct clk_notifier - associate a clk with a notifier
  41. * @clk: struct clk * to associate the notifier with
  42. * @notifier_head: a blocking_notifier_head for this clk
  43. * @node: linked list pointers
  44. *
  45. * A list of struct clk_notifier is maintained by the notifier code.
  46. * An entry is created whenever code registers the first notifier on a
  47. * particular @clk. Future notifiers on that @clk are added to the
  48. * @notifier_head.
  49. */
  50. struct clk_notifier {
  51. struct clk *clk;
  52. struct srcu_notifier_head notifier_head;
  53. struct list_head node;
  54. };
  55. /**
  56. * struct clk_notifier_data - rate data to pass to the notifier callback
  57. * @clk: struct clk * being changed
  58. * @old_rate: previous rate of this clk
  59. * @new_rate: new rate of this clk
  60. *
  61. * For a pre-notifier, old_rate is the clk's rate before this rate
  62. * change, and new_rate is what the rate will be in the future. For a
  63. * post-notifier, old_rate and new_rate are both set to the clk's
  64. * current rate (this was done to optimize the implementation).
  65. */
  66. struct clk_notifier_data {
  67. struct clk *clk;
  68. unsigned long old_rate;
  69. unsigned long new_rate;
  70. };
  71. /**
  72. * struct clk_bulk_data - Data used for bulk clk operations.
  73. *
  74. * @id: clock consumer ID
  75. * @clk: struct clk * to store the associated clock
  76. *
  77. * The CLK APIs provide a series of clk_bulk_() API calls as
  78. * a convenience to consumers which require multiple clks. This
  79. * structure is used to manage data for these calls.
  80. */
  81. struct clk_bulk_data {
  82. const char *id;
  83. struct clk *clk;
  84. };
  85. #ifdef CONFIG_COMMON_CLK
  86. /**
  87. * clk_notifier_register - register a clock rate-change notifier callback
  88. * @clk: clock whose rate we are interested in
  89. * @nb: notifier block with callback function pointer
  90. *
  91. * ProTip: debugging across notifier chains can be frustrating. Make sure that
  92. * your notifier callback function prints a nice big warning in case of
  93. * failure.
  94. */
  95. int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
  96. /**
  97. * clk_notifier_unregister - unregister a clock rate-change notifier callback
  98. * @clk: clock whose rate we are no longer interested in
  99. * @nb: notifier block which will be unregistered
  100. */
  101. int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
  102. /**
  103. * devm_clk_notifier_register - register a managed rate-change notifier callback
  104. * @dev: device for clock "consumer"
  105. * @clk: clock whose rate we are interested in
  106. * @nb: notifier block with callback function pointer
  107. *
  108. * Returns 0 on success, -EERROR otherwise
  109. */
  110. int devm_clk_notifier_register(struct device *dev, struct clk *clk,
  111. struct notifier_block *nb);
  112. /**
  113. * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
  114. * for a clock source.
  115. * @clk: clock source
  116. *
  117. * This gets the clock source accuracy expressed in ppb.
  118. * A perfect clock returns 0.
  119. */
  120. long clk_get_accuracy(struct clk *clk);
  121. /**
  122. * clk_set_phase - adjust the phase shift of a clock signal
  123. * @clk: clock signal source
  124. * @degrees: number of degrees the signal is shifted
  125. *
  126. * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
  127. * success, -EERROR otherwise.
  128. */
  129. int clk_set_phase(struct clk *clk, int degrees);
  130. /**
  131. * clk_get_phase - return the phase shift of a clock signal
  132. * @clk: clock signal source
  133. *
  134. * Returns the phase shift of a clock node in degrees, otherwise returns
  135. * -EERROR.
  136. */
  137. int clk_get_phase(struct clk *clk);
  138. /**
  139. * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
  140. * @clk: clock signal source
  141. * @num: numerator of the duty cycle ratio to be applied
  142. * @den: denominator of the duty cycle ratio to be applied
  143. *
  144. * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
  145. * success, -EERROR otherwise.
  146. */
  147. int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
  148. /**
  149. * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
  150. * @clk: clock signal source
  151. * @scale: scaling factor to be applied to represent the ratio as an integer
  152. *
  153. * Returns the duty cycle ratio multiplied by the scale provided, otherwise
  154. * returns -EERROR.
  155. */
  156. int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
  157. /**
  158. * clk_is_match - check if two clk's point to the same hardware clock
  159. * @p: clk compared against q
  160. * @q: clk compared against p
  161. *
  162. * Returns true if the two struct clk pointers both point to the same hardware
  163. * clock node. Put differently, returns true if @p and @q
  164. * share the same &struct clk_core object.
  165. *
  166. * Returns false otherwise. Note that two NULL clks are treated as matching.
  167. */
  168. bool clk_is_match(const struct clk *p, const struct clk *q);
  169. /**
  170. * clk_rate_exclusive_get - get exclusivity over the rate control of a
  171. * producer
  172. * @clk: clock source
  173. *
  174. * This function allows drivers to get exclusive control over the rate of a
  175. * provider. It prevents any other consumer to execute, even indirectly,
  176. * opereation which could alter the rate of the provider or cause glitches
  177. *
  178. * If exlusivity is claimed more than once on clock, even by the same driver,
  179. * the rate effectively gets locked as exclusivity can't be preempted.
  180. *
  181. * Must not be called from within atomic context.
  182. *
  183. * Returns success (0) or negative errno.
  184. */
  185. int clk_rate_exclusive_get(struct clk *clk);
  186. /**
  187. * clk_rate_exclusive_put - release exclusivity over the rate control of a
  188. * producer
  189. * @clk: clock source
  190. *
  191. * This function allows drivers to release the exclusivity it previously got
  192. * from clk_rate_exclusive_get()
  193. *
  194. * The caller must balance the number of clk_rate_exclusive_get() and
  195. * clk_rate_exclusive_put() calls.
  196. *
  197. * Must not be called from within atomic context.
  198. */
  199. void clk_rate_exclusive_put(struct clk *clk);
  200. #else
  201. static inline int clk_notifier_register(struct clk *clk,
  202. struct notifier_block *nb)
  203. {
  204. return -ENOTSUPP;
  205. }
  206. static inline int clk_notifier_unregister(struct clk *clk,
  207. struct notifier_block *nb)
  208. {
  209. return -ENOTSUPP;
  210. }
  211. static inline int devm_clk_notifier_register(struct device *dev,
  212. struct clk *clk,
  213. struct notifier_block *nb)
  214. {
  215. return -ENOTSUPP;
  216. }
  217. static inline long clk_get_accuracy(struct clk *clk)
  218. {
  219. return -ENOTSUPP;
  220. }
  221. static inline long clk_set_phase(struct clk *clk, int phase)
  222. {
  223. return -ENOTSUPP;
  224. }
  225. static inline long clk_get_phase(struct clk *clk)
  226. {
  227. return -ENOTSUPP;
  228. }
  229. static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
  230. unsigned int den)
  231. {
  232. return -ENOTSUPP;
  233. }
  234. static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
  235. unsigned int scale)
  236. {
  237. return 0;
  238. }
  239. static inline bool clk_is_match(const struct clk *p, const struct clk *q)
  240. {
  241. return p == q;
  242. }
  243. static inline int clk_rate_exclusive_get(struct clk *clk)
  244. {
  245. return 0;
  246. }
  247. static inline void clk_rate_exclusive_put(struct clk *clk) {}
  248. #endif
  249. #ifdef CONFIG_HAVE_CLK_PREPARE
  250. /**
  251. * clk_prepare - prepare a clock source
  252. * @clk: clock source
  253. *
  254. * This prepares the clock source for use.
  255. *
  256. * Must not be called from within atomic context.
  257. */
  258. int clk_prepare(struct clk *clk);
  259. int __must_check clk_bulk_prepare(int num_clks,
  260. const struct clk_bulk_data *clks);
  261. /**
  262. * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
  263. * @clk: clock source
  264. *
  265. * Returns true if clk_prepare() implicitly enables the clock, effectively
  266. * making clk_enable()/clk_disable() no-ops, false otherwise.
  267. *
  268. * This is of interest mainly to the power management code where actually
  269. * disabling the clock also requires unpreparing it to have any material
  270. * effect.
  271. *
  272. * Regardless of the value returned here, the caller must always invoke
  273. * clk_enable() or clk_prepare_enable() and counterparts for usage counts
  274. * to be right.
  275. */
  276. bool clk_is_enabled_when_prepared(struct clk *clk);
  277. #else
  278. static inline int clk_prepare(struct clk *clk)
  279. {
  280. might_sleep();
  281. return 0;
  282. }
  283. static inline int __must_check
  284. clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
  285. {
  286. might_sleep();
  287. return 0;
  288. }
  289. static inline bool clk_is_enabled_when_prepared(struct clk *clk)
  290. {
  291. return false;
  292. }
  293. #endif
  294. /**
  295. * clk_unprepare - undo preparation of a clock source
  296. * @clk: clock source
  297. *
  298. * This undoes a previously prepared clock. The caller must balance
  299. * the number of prepare and unprepare calls.
  300. *
  301. * Must not be called from within atomic context.
  302. */
  303. #ifdef CONFIG_HAVE_CLK_PREPARE
  304. void clk_unprepare(struct clk *clk);
  305. void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
  306. #else
  307. static inline void clk_unprepare(struct clk *clk)
  308. {
  309. might_sleep();
  310. }
  311. static inline void clk_bulk_unprepare(int num_clks,
  312. const struct clk_bulk_data *clks)
  313. {
  314. might_sleep();
  315. }
  316. #endif
  317. #ifdef CONFIG_HAVE_CLK
  318. /**
  319. * clk_get - lookup and obtain a reference to a clock producer.
  320. * @dev: device for clock "consumer"
  321. * @id: clock consumer ID
  322. *
  323. * Returns a struct clk corresponding to the clock producer, or
  324. * valid IS_ERR() condition containing errno. The implementation
  325. * uses @dev and @id to determine the clock consumer, and thereby
  326. * the clock producer. (IOW, @id may be identical strings, but
  327. * clk_get may return different clock producers depending on @dev.)
  328. *
  329. * Drivers must assume that the clock source is not enabled.
  330. *
  331. * clk_get should not be called from within interrupt context.
  332. */
  333. struct clk *clk_get(struct device *dev, const char *id);
  334. /**
  335. * clk_bulk_get - lookup and obtain a number of references to clock producer.
  336. * @dev: device for clock "consumer"
  337. * @num_clks: the number of clk_bulk_data
  338. * @clks: the clk_bulk_data table of consumer
  339. *
  340. * This helper function allows drivers to get several clk consumers in one
  341. * operation. If any of the clk cannot be acquired then any clks
  342. * that were obtained will be freed before returning to the caller.
  343. *
  344. * Returns 0 if all clocks specified in clk_bulk_data table are obtained
  345. * successfully, or valid IS_ERR() condition containing errno.
  346. * The implementation uses @dev and @clk_bulk_data.id to determine the
  347. * clock consumer, and thereby the clock producer.
  348. * The clock returned is stored in each @clk_bulk_data.clk field.
  349. *
  350. * Drivers must assume that the clock source is not enabled.
  351. *
  352. * clk_bulk_get should not be called from within interrupt context.
  353. */
  354. int __must_check clk_bulk_get(struct device *dev, int num_clks,
  355. struct clk_bulk_data *clks);
  356. /**
  357. * clk_bulk_get_all - lookup and obtain all available references to clock
  358. * producer.
  359. * @dev: device for clock "consumer"
  360. * @clks: pointer to the clk_bulk_data table of consumer
  361. *
  362. * This helper function allows drivers to get all clk consumers in one
  363. * operation. If any of the clk cannot be acquired then any clks
  364. * that were obtained will be freed before returning to the caller.
  365. *
  366. * Returns a positive value for the number of clocks obtained while the
  367. * clock references are stored in the clk_bulk_data table in @clks field.
  368. * Returns 0 if there're none and a negative value if something failed.
  369. *
  370. * Drivers must assume that the clock source is not enabled.
  371. *
  372. * clk_bulk_get should not be called from within interrupt context.
  373. */
  374. int __must_check clk_bulk_get_all(struct device *dev,
  375. struct clk_bulk_data **clks);
  376. /**
  377. * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
  378. * @dev: device for clock "consumer"
  379. * @num_clks: the number of clk_bulk_data
  380. * @clks: the clk_bulk_data table of consumer
  381. *
  382. * Behaves the same as clk_bulk_get() except where there is no clock producer.
  383. * In this case, instead of returning -ENOENT, the function returns 0 and
  384. * NULL for a clk for which a clock producer could not be determined.
  385. */
  386. int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
  387. struct clk_bulk_data *clks);
  388. /**
  389. * devm_clk_bulk_get - managed get multiple clk consumers
  390. * @dev: device for clock "consumer"
  391. * @num_clks: the number of clk_bulk_data
  392. * @clks: the clk_bulk_data table of consumer
  393. *
  394. * Return 0 on success, an errno on failure.
  395. *
  396. * This helper function allows drivers to get several clk
  397. * consumers in one operation with management, the clks will
  398. * automatically be freed when the device is unbound.
  399. */
  400. int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
  401. struct clk_bulk_data *clks);
  402. /**
  403. * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
  404. * @dev: device for clock "consumer"
  405. * @num_clks: the number of clk_bulk_data
  406. * @clks: pointer to the clk_bulk_data table of consumer
  407. *
  408. * Behaves the same as devm_clk_bulk_get() except where there is no clock
  409. * producer. In this case, instead of returning -ENOENT, the function returns
  410. * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
  411. *
  412. * Returns 0 if all clocks specified in clk_bulk_data table are obtained
  413. * successfully or for any clk there was no clk provider available, otherwise
  414. * returns valid IS_ERR() condition containing errno.
  415. * The implementation uses @dev and @clk_bulk_data.id to determine the
  416. * clock consumer, and thereby the clock producer.
  417. * The clock returned is stored in each @clk_bulk_data.clk field.
  418. *
  419. * Drivers must assume that the clock source is not enabled.
  420. *
  421. * clk_bulk_get should not be called from within interrupt context.
  422. */
  423. int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
  424. struct clk_bulk_data *clks);
  425. /**
  426. * devm_clk_bulk_get_all - managed get multiple clk consumers
  427. * @dev: device for clock "consumer"
  428. * @clks: pointer to the clk_bulk_data table of consumer
  429. *
  430. * Returns a positive value for the number of clocks obtained while the
  431. * clock references are stored in the clk_bulk_data table in @clks field.
  432. * Returns 0 if there're none and a negative value if something failed.
  433. *
  434. * This helper function allows drivers to get several clk
  435. * consumers in one operation with management, the clks will
  436. * automatically be freed when the device is unbound.
  437. */
  438. int __must_check devm_clk_bulk_get_all(struct device *dev,
  439. struct clk_bulk_data **clks);
  440. /**
  441. * devm_clk_get - lookup and obtain a managed reference to a clock producer.
  442. * @dev: device for clock "consumer"
  443. * @id: clock consumer ID
  444. *
  445. * Context: May sleep.
  446. *
  447. * Return: a struct clk corresponding to the clock producer, or
  448. * valid IS_ERR() condition containing errno. The implementation
  449. * uses @dev and @id to determine the clock consumer, and thereby
  450. * the clock producer. (IOW, @id may be identical strings, but
  451. * clk_get may return different clock producers depending on @dev.)
  452. *
  453. * Drivers must assume that the clock source is neither prepared nor
  454. * enabled.
  455. *
  456. * The clock will automatically be freed when the device is unbound
  457. * from the bus.
  458. */
  459. struct clk *devm_clk_get(struct device *dev, const char *id);
  460. /**
  461. * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
  462. * @dev: device for clock "consumer"
  463. * @id: clock consumer ID
  464. *
  465. * Context: May sleep.
  466. *
  467. * Return: a struct clk corresponding to the clock producer, or
  468. * valid IS_ERR() condition containing errno. The implementation
  469. * uses @dev and @id to determine the clock consumer, and thereby
  470. * the clock producer. (IOW, @id may be identical strings, but
  471. * clk_get may return different clock producers depending on @dev.)
  472. *
  473. * The returned clk (if valid) is prepared. Drivers must however assume
  474. * that the clock is not enabled.
  475. *
  476. * The clock will automatically be unprepared and freed when the device
  477. * is unbound from the bus.
  478. */
  479. struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
  480. /**
  481. * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
  482. * @dev: device for clock "consumer"
  483. * @id: clock consumer ID
  484. *
  485. * Context: May sleep.
  486. *
  487. * Return: a struct clk corresponding to the clock producer, or
  488. * valid IS_ERR() condition containing errno. The implementation
  489. * uses @dev and @id to determine the clock consumer, and thereby
  490. * the clock producer. (IOW, @id may be identical strings, but
  491. * clk_get may return different clock producers depending on @dev.)
  492. *
  493. * The returned clk (if valid) is prepared and enabled.
  494. *
  495. * The clock will automatically be disabled, unprepared and freed
  496. * when the device is unbound from the bus.
  497. */
  498. struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
  499. /**
  500. * devm_clk_get_optional - lookup and obtain a managed reference to an optional
  501. * clock producer.
  502. * @dev: device for clock "consumer"
  503. * @id: clock consumer ID
  504. *
  505. * Context: May sleep.
  506. *
  507. * Return: a struct clk corresponding to the clock producer, or
  508. * valid IS_ERR() condition containing errno. The implementation
  509. * uses @dev and @id to determine the clock consumer, and thereby
  510. * the clock producer. If no such clk is found, it returns NULL
  511. * which serves as a dummy clk. That's the only difference compared
  512. * to devm_clk_get().
  513. *
  514. * Drivers must assume that the clock source is neither prepared nor
  515. * enabled.
  516. *
  517. * The clock will automatically be freed when the device is unbound
  518. * from the bus.
  519. */
  520. struct clk *devm_clk_get_optional(struct device *dev, const char *id);
  521. /**
  522. * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
  523. * @dev: device for clock "consumer"
  524. * @id: clock consumer ID
  525. *
  526. * Context: May sleep.
  527. *
  528. * Return: a struct clk corresponding to the clock producer, or
  529. * valid IS_ERR() condition containing errno. The implementation
  530. * uses @dev and @id to determine the clock consumer, and thereby
  531. * the clock producer. If no such clk is found, it returns NULL
  532. * which serves as a dummy clk. That's the only difference compared
  533. * to devm_clk_get_prepared().
  534. *
  535. * The returned clk (if valid) is prepared. Drivers must however
  536. * assume that the clock is not enabled.
  537. *
  538. * The clock will automatically be unprepared and freed when the
  539. * device is unbound from the bus.
  540. */
  541. struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
  542. /**
  543. * devm_clk_get_optional_enabled - devm_clk_get_optional() +
  544. * clk_prepare_enable()
  545. * @dev: device for clock "consumer"
  546. * @id: clock consumer ID
  547. *
  548. * Context: May sleep.
  549. *
  550. * Return: a struct clk corresponding to the clock producer, or
  551. * valid IS_ERR() condition containing errno. The implementation
  552. * uses @dev and @id to determine the clock consumer, and thereby
  553. * the clock producer. If no such clk is found, it returns NULL
  554. * which serves as a dummy clk. That's the only difference compared
  555. * to devm_clk_get_enabled().
  556. *
  557. * The returned clk (if valid) is prepared and enabled.
  558. *
  559. * The clock will automatically be disabled, unprepared and freed
  560. * when the device is unbound from the bus.
  561. */
  562. struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
  563. /**
  564. * devm_get_clk_from_child - lookup and obtain a managed reference to a
  565. * clock producer from child node.
  566. * @dev: device for clock "consumer"
  567. * @np: pointer to clock consumer node
  568. * @con_id: clock consumer ID
  569. *
  570. * This function parses the clocks, and uses them to look up the
  571. * struct clk from the registered list of clock providers by using
  572. * @np and @con_id
  573. *
  574. * The clock will automatically be freed when the device is unbound
  575. * from the bus.
  576. */
  577. struct clk *devm_get_clk_from_child(struct device *dev,
  578. struct device_node *np, const char *con_id);
  579. /**
  580. * clk_enable - inform the system when the clock source should be running.
  581. * @clk: clock source
  582. *
  583. * If the clock can not be enabled/disabled, this should return success.
  584. *
  585. * May be called from atomic contexts.
  586. *
  587. * Returns success (0) or negative errno.
  588. */
  589. int clk_enable(struct clk *clk);
  590. /**
  591. * clk_bulk_enable - inform the system when the set of clks should be running.
  592. * @num_clks: the number of clk_bulk_data
  593. * @clks: the clk_bulk_data table of consumer
  594. *
  595. * May be called from atomic contexts.
  596. *
  597. * Returns success (0) or negative errno.
  598. */
  599. int __must_check clk_bulk_enable(int num_clks,
  600. const struct clk_bulk_data *clks);
  601. /**
  602. * clk_disable - inform the system when the clock source is no longer required.
  603. * @clk: clock source
  604. *
  605. * Inform the system that a clock source is no longer required by
  606. * a driver and may be shut down.
  607. *
  608. * May be called from atomic contexts.
  609. *
  610. * Implementation detail: if the clock source is shared between
  611. * multiple drivers, clk_enable() calls must be balanced by the
  612. * same number of clk_disable() calls for the clock source to be
  613. * disabled.
  614. */
  615. void clk_disable(struct clk *clk);
  616. /**
  617. * clk_bulk_disable - inform the system when the set of clks is no
  618. * longer required.
  619. * @num_clks: the number of clk_bulk_data
  620. * @clks: the clk_bulk_data table of consumer
  621. *
  622. * Inform the system that a set of clks is no longer required by
  623. * a driver and may be shut down.
  624. *
  625. * May be called from atomic contexts.
  626. *
  627. * Implementation detail: if the set of clks is shared between
  628. * multiple drivers, clk_bulk_enable() calls must be balanced by the
  629. * same number of clk_bulk_disable() calls for the clock source to be
  630. * disabled.
  631. */
  632. void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
  633. /**
  634. * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
  635. * This is only valid once the clock source has been enabled.
  636. * @clk: clock source
  637. */
  638. unsigned long clk_get_rate(struct clk *clk);
  639. /**
  640. * clk_put - "free" the clock source
  641. * @clk: clock source
  642. *
  643. * Note: drivers must ensure that all clk_enable calls made on this
  644. * clock source are balanced by clk_disable calls prior to calling
  645. * this function.
  646. *
  647. * clk_put should not be called from within interrupt context.
  648. */
  649. void clk_put(struct clk *clk);
  650. /**
  651. * clk_bulk_put - "free" the clock source
  652. * @num_clks: the number of clk_bulk_data
  653. * @clks: the clk_bulk_data table of consumer
  654. *
  655. * Note: drivers must ensure that all clk_bulk_enable calls made on this
  656. * clock source are balanced by clk_bulk_disable calls prior to calling
  657. * this function.
  658. *
  659. * clk_bulk_put should not be called from within interrupt context.
  660. */
  661. void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
  662. /**
  663. * clk_bulk_put_all - "free" all the clock source
  664. * @num_clks: the number of clk_bulk_data
  665. * @clks: the clk_bulk_data table of consumer
  666. *
  667. * Note: drivers must ensure that all clk_bulk_enable calls made on this
  668. * clock source are balanced by clk_bulk_disable calls prior to calling
  669. * this function.
  670. *
  671. * clk_bulk_put_all should not be called from within interrupt context.
  672. */
  673. void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
  674. /**
  675. * devm_clk_put - "free" a managed clock source
  676. * @dev: device used to acquire the clock
  677. * @clk: clock source acquired with devm_clk_get()
  678. *
  679. * Note: drivers must ensure that all clk_enable calls made on this
  680. * clock source are balanced by clk_disable calls prior to calling
  681. * this function.
  682. *
  683. * clk_put should not be called from within interrupt context.
  684. */
  685. void devm_clk_put(struct device *dev, struct clk *clk);
  686. /*
  687. * The remaining APIs are optional for machine class support.
  688. */
  689. /**
  690. * clk_round_rate - adjust a rate to the exact rate a clock can provide
  691. * @clk: clock source
  692. * @rate: desired clock rate in Hz
  693. *
  694. * This answers the question "if I were to pass @rate to clk_set_rate(),
  695. * what clock rate would I end up with?" without changing the hardware
  696. * in any way. In other words:
  697. *
  698. * rate = clk_round_rate(clk, r);
  699. *
  700. * and:
  701. *
  702. * clk_set_rate(clk, r);
  703. * rate = clk_get_rate(clk);
  704. *
  705. * are equivalent except the former does not modify the clock hardware
  706. * in any way.
  707. *
  708. * Returns rounded clock rate in Hz, or negative errno.
  709. */
  710. long clk_round_rate(struct clk *clk, unsigned long rate);
  711. /**
  712. * clk_set_rate - set the clock rate for a clock source
  713. * @clk: clock source
  714. * @rate: desired clock rate in Hz
  715. *
  716. * Updating the rate starts at the top-most affected clock and then
  717. * walks the tree down to the bottom-most clock that needs updating.
  718. *
  719. * Returns success (0) or negative errno.
  720. */
  721. int clk_set_rate(struct clk *clk, unsigned long rate);
  722. /**
  723. * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
  724. * clock source
  725. * @clk: clock source
  726. * @rate: desired clock rate in Hz
  727. *
  728. * This helper function allows drivers to atomically set the rate of a producer
  729. * and claim exclusivity over the rate control of the producer.
  730. *
  731. * It is essentially a combination of clk_set_rate() and
  732. * clk_rate_exclusite_get(). Caller must balance this call with a call to
  733. * clk_rate_exclusive_put()
  734. *
  735. * Returns success (0) or negative errno.
  736. */
  737. int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
  738. /**
  739. * clk_has_parent - check if a clock is a possible parent for another
  740. * @clk: clock source
  741. * @parent: parent clock source
  742. *
  743. * This function can be used in drivers that need to check that a clock can be
  744. * the parent of another without actually changing the parent.
  745. *
  746. * Returns true if @parent is a possible parent for @clk, false otherwise.
  747. */
  748. bool clk_has_parent(const struct clk *clk, const struct clk *parent);
  749. /**
  750. * clk_set_rate_range - set a rate range for a clock source
  751. * @clk: clock source
  752. * @min: desired minimum clock rate in Hz, inclusive
  753. * @max: desired maximum clock rate in Hz, inclusive
  754. *
  755. * Returns success (0) or negative errno.
  756. */
  757. int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
  758. /**
  759. * clk_set_min_rate - set a minimum clock rate for a clock source
  760. * @clk: clock source
  761. * @rate: desired minimum clock rate in Hz, inclusive
  762. *
  763. * Returns success (0) or negative errno.
  764. */
  765. int clk_set_min_rate(struct clk *clk, unsigned long rate);
  766. /**
  767. * clk_set_max_rate - set a maximum clock rate for a clock source
  768. * @clk: clock source
  769. * @rate: desired maximum clock rate in Hz, inclusive
  770. *
  771. * Returns success (0) or negative errno.
  772. */
  773. int clk_set_max_rate(struct clk *clk, unsigned long rate);
  774. /**
  775. * clk_set_parent - set the parent clock source for this clock
  776. * @clk: clock source
  777. * @parent: parent clock source
  778. *
  779. * Returns success (0) or negative errno.
  780. */
  781. int clk_set_parent(struct clk *clk, struct clk *parent);
  782. /**
  783. * clk_get_parent - get the parent clock source for this clock
  784. * @clk: clock source
  785. *
  786. * Returns struct clk corresponding to parent clock source, or
  787. * valid IS_ERR() condition containing errno.
  788. */
  789. struct clk *clk_get_parent(struct clk *clk);
  790. /**
  791. * clk_get_sys - get a clock based upon the device name
  792. * @dev_id: device name
  793. * @con_id: connection ID
  794. *
  795. * Returns a struct clk corresponding to the clock producer, or
  796. * valid IS_ERR() condition containing errno. The implementation
  797. * uses @dev_id and @con_id to determine the clock consumer, and
  798. * thereby the clock producer. In contrast to clk_get() this function
  799. * takes the device name instead of the device itself for identification.
  800. *
  801. * Drivers must assume that the clock source is not enabled.
  802. *
  803. * clk_get_sys should not be called from within interrupt context.
  804. */
  805. struct clk *clk_get_sys(const char *dev_id, const char *con_id);
  806. /**
  807. * clk_save_context - save clock context for poweroff
  808. *
  809. * Saves the context of the clock register for powerstates in which the
  810. * contents of the registers will be lost. Occurs deep within the suspend
  811. * code so locking is not necessary.
  812. */
  813. int clk_save_context(void);
  814. /**
  815. * clk_restore_context - restore clock context after poweroff
  816. *
  817. * This occurs with all clocks enabled. Occurs deep within the resume code
  818. * so locking is not necessary.
  819. */
  820. void clk_restore_context(void);
  821. #else /* !CONFIG_HAVE_CLK */
  822. static inline struct clk *clk_get(struct device *dev, const char *id)
  823. {
  824. return NULL;
  825. }
  826. static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
  827. struct clk_bulk_data *clks)
  828. {
  829. return 0;
  830. }
  831. static inline int __must_check clk_bulk_get_optional(struct device *dev,
  832. int num_clks, struct clk_bulk_data *clks)
  833. {
  834. return 0;
  835. }
  836. static inline int __must_check clk_bulk_get_all(struct device *dev,
  837. struct clk_bulk_data **clks)
  838. {
  839. return 0;
  840. }
  841. static inline struct clk *devm_clk_get(struct device *dev, const char *id)
  842. {
  843. return NULL;
  844. }
  845. static inline struct clk *devm_clk_get_prepared(struct device *dev,
  846. const char *id)
  847. {
  848. return NULL;
  849. }
  850. static inline struct clk *devm_clk_get_enabled(struct device *dev,
  851. const char *id)
  852. {
  853. return NULL;
  854. }
  855. static inline struct clk *devm_clk_get_optional(struct device *dev,
  856. const char *id)
  857. {
  858. return NULL;
  859. }
  860. static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
  861. const char *id)
  862. {
  863. return NULL;
  864. }
  865. static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
  866. const char *id)
  867. {
  868. return NULL;
  869. }
  870. static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
  871. struct clk_bulk_data *clks)
  872. {
  873. return 0;
  874. }
  875. static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
  876. int num_clks, struct clk_bulk_data *clks)
  877. {
  878. return 0;
  879. }
  880. static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
  881. struct clk_bulk_data **clks)
  882. {
  883. return 0;
  884. }
  885. static inline struct clk *devm_get_clk_from_child(struct device *dev,
  886. struct device_node *np, const char *con_id)
  887. {
  888. return NULL;
  889. }
  890. static inline void clk_put(struct clk *clk) {}
  891. static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
  892. static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
  893. static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
  894. static inline int clk_enable(struct clk *clk)
  895. {
  896. return 0;
  897. }
  898. static inline int __must_check clk_bulk_enable(int num_clks,
  899. const struct clk_bulk_data *clks)
  900. {
  901. return 0;
  902. }
  903. static inline void clk_disable(struct clk *clk) {}
  904. static inline void clk_bulk_disable(int num_clks,
  905. const struct clk_bulk_data *clks) {}
  906. static inline unsigned long clk_get_rate(struct clk *clk)
  907. {
  908. return 0;
  909. }
  910. static inline int clk_set_rate(struct clk *clk, unsigned long rate)
  911. {
  912. return 0;
  913. }
  914. static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
  915. {
  916. return 0;
  917. }
  918. static inline long clk_round_rate(struct clk *clk, unsigned long rate)
  919. {
  920. return 0;
  921. }
  922. static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
  923. {
  924. return true;
  925. }
  926. static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
  927. unsigned long max)
  928. {
  929. return 0;
  930. }
  931. static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
  932. {
  933. return 0;
  934. }
  935. static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
  936. {
  937. return 0;
  938. }
  939. static inline int clk_set_parent(struct clk *clk, struct clk *parent)
  940. {
  941. return 0;
  942. }
  943. static inline struct clk *clk_get_parent(struct clk *clk)
  944. {
  945. return NULL;
  946. }
  947. static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
  948. {
  949. return NULL;
  950. }
  951. static inline int clk_save_context(void)
  952. {
  953. return 0;
  954. }
  955. static inline void clk_restore_context(void) {}
  956. #endif
  957. /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
  958. static inline int clk_prepare_enable(struct clk *clk)
  959. {
  960. int ret;
  961. ret = clk_prepare(clk);
  962. if (ret)
  963. return ret;
  964. ret = clk_enable(clk);
  965. if (ret)
  966. clk_unprepare(clk);
  967. return ret;
  968. }
  969. /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
  970. static inline void clk_disable_unprepare(struct clk *clk)
  971. {
  972. clk_disable(clk);
  973. clk_unprepare(clk);
  974. }
  975. static inline int __must_check
  976. clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks)
  977. {
  978. int ret;
  979. ret = clk_bulk_prepare(num_clks, clks);
  980. if (ret)
  981. return ret;
  982. ret = clk_bulk_enable(num_clks, clks);
  983. if (ret)
  984. clk_bulk_unprepare(num_clks, clks);
  985. return ret;
  986. }
  987. static inline void clk_bulk_disable_unprepare(int num_clks,
  988. const struct clk_bulk_data *clks)
  989. {
  990. clk_bulk_disable(num_clks, clks);
  991. clk_bulk_unprepare(num_clks, clks);
  992. }
  993. /**
  994. * clk_drop_range - Reset any range set on that clock
  995. * @clk: clock source
  996. *
  997. * Returns success (0) or negative errno.
  998. */
  999. static inline int clk_drop_range(struct clk *clk)
  1000. {
  1001. return clk_set_rate_range(clk, 0, ULONG_MAX);
  1002. }
  1003. /**
  1004. * clk_get_optional - lookup and obtain a reference to an optional clock
  1005. * producer.
  1006. * @dev: device for clock "consumer"
  1007. * @id: clock consumer ID
  1008. *
  1009. * Behaves the same as clk_get() except where there is no clock producer. In
  1010. * this case, instead of returning -ENOENT, the function returns NULL.
  1011. */
  1012. static inline struct clk *clk_get_optional(struct device *dev, const char *id)
  1013. {
  1014. struct clk *clk = clk_get(dev, id);
  1015. if (clk == ERR_PTR(-ENOENT))
  1016. return NULL;
  1017. return clk;
  1018. }
  1019. #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
  1020. struct clk *of_clk_get(struct device_node *np, int index);
  1021. struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
  1022. struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
  1023. #else
  1024. static inline struct clk *of_clk_get(struct device_node *np, int index)
  1025. {
  1026. return ERR_PTR(-ENOENT);
  1027. }
  1028. static inline struct clk *of_clk_get_by_name(struct device_node *np,
  1029. const char *name)
  1030. {
  1031. return ERR_PTR(-ENOENT);
  1032. }
  1033. static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
  1034. {
  1035. return ERR_PTR(-ENOENT);
  1036. }
  1037. #endif
  1038. #endif