intel_wakeref.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * SPDX-License-Identifier: MIT
  3. *
  4. * Copyright © 2019 Intel Corporation
  5. */
  6. #ifndef INTEL_WAKEREF_H
  7. #define INTEL_WAKEREF_H
  8. #include <linux/atomic.h>
  9. #include <linux/bitfield.h>
  10. #include <linux/bits.h>
  11. #include <linux/lockdep.h>
  12. #include <linux/mutex.h>
  13. #include <linux/refcount.h>
  14. #include <linux/stackdepot.h>
  15. #include <linux/timer.h>
  16. #include <linux/workqueue.h>
  17. #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
  18. #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
  19. #else
  20. #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
  21. #endif
  22. struct intel_runtime_pm;
  23. struct intel_wakeref;
  24. typedef depot_stack_handle_t intel_wakeref_t;
  25. struct intel_wakeref_ops {
  26. int (*get)(struct intel_wakeref *wf);
  27. int (*put)(struct intel_wakeref *wf);
  28. };
  29. struct intel_wakeref {
  30. atomic_t count;
  31. struct mutex mutex;
  32. intel_wakeref_t wakeref;
  33. struct intel_runtime_pm *rpm;
  34. const struct intel_wakeref_ops *ops;
  35. struct delayed_work work;
  36. };
  37. struct intel_wakeref_lockclass {
  38. struct lock_class_key mutex;
  39. struct lock_class_key work;
  40. };
  41. void __intel_wakeref_init(struct intel_wakeref *wf,
  42. struct intel_runtime_pm *rpm,
  43. const struct intel_wakeref_ops *ops,
  44. struct intel_wakeref_lockclass *key);
  45. #define intel_wakeref_init(wf, rpm, ops) do { \
  46. static struct intel_wakeref_lockclass __key; \
  47. \
  48. __intel_wakeref_init((wf), (rpm), (ops), &__key); \
  49. } while (0)
  50. int __intel_wakeref_get_first(struct intel_wakeref *wf);
  51. void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
  52. /**
  53. * intel_wakeref_get: Acquire the wakeref
  54. * @wf: the wakeref
  55. *
  56. * Acquire a hold on the wakeref. The first user to do so, will acquire
  57. * the runtime pm wakeref and then call the @fn underneath the wakeref
  58. * mutex.
  59. *
  60. * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
  61. * will be released and the acquisition unwound, and an error reported.
  62. *
  63. * Returns: 0 if the wakeref was acquired successfully, or a negative error
  64. * code otherwise.
  65. */
  66. static inline int
  67. intel_wakeref_get(struct intel_wakeref *wf)
  68. {
  69. might_sleep();
  70. if (unlikely(!atomic_inc_not_zero(&wf->count)))
  71. return __intel_wakeref_get_first(wf);
  72. return 0;
  73. }
  74. /**
  75. * __intel_wakeref_get: Acquire the wakeref, again
  76. * @wf: the wakeref
  77. *
  78. * Increment the wakeref counter, only valid if it is already held by
  79. * the caller.
  80. *
  81. * See intel_wakeref_get().
  82. */
  83. static inline void
  84. __intel_wakeref_get(struct intel_wakeref *wf)
  85. {
  86. INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
  87. atomic_inc(&wf->count);
  88. }
  89. /**
  90. * intel_wakeref_get_if_in_use: Acquire the wakeref
  91. * @wf: the wakeref
  92. *
  93. * Acquire a hold on the wakeref, but only if the wakeref is already
  94. * active.
  95. *
  96. * Returns: true if the wakeref was acquired, false otherwise.
  97. */
  98. static inline bool
  99. intel_wakeref_get_if_active(struct intel_wakeref *wf)
  100. {
  101. return atomic_inc_not_zero(&wf->count);
  102. }
  103. enum {
  104. INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
  105. __INTEL_WAKEREF_PUT_LAST_BIT__
  106. };
  107. static inline void
  108. intel_wakeref_might_get(struct intel_wakeref *wf)
  109. {
  110. might_lock(&wf->mutex);
  111. }
  112. /**
  113. * intel_wakeref_put_flags: Release the wakeref
  114. * @wf: the wakeref
  115. * @flags: control flags
  116. *
  117. * Release our hold on the wakeref. When there are no more users,
  118. * the runtime pm wakeref will be released after the @fn callback is called
  119. * underneath the wakeref mutex.
  120. *
  121. * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
  122. * is retained and an error reported.
  123. *
  124. * Returns: 0 if the wakeref was released successfully, or a negative error
  125. * code otherwise.
  126. */
  127. static inline void
  128. __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
  129. #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
  130. #define INTEL_WAKEREF_PUT_DELAY \
  131. GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
  132. {
  133. INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
  134. if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
  135. __intel_wakeref_put_last(wf, flags);
  136. }
  137. static inline void
  138. intel_wakeref_put(struct intel_wakeref *wf)
  139. {
  140. might_sleep();
  141. __intel_wakeref_put(wf, 0);
  142. }
  143. static inline void
  144. intel_wakeref_put_async(struct intel_wakeref *wf)
  145. {
  146. __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
  147. }
  148. static inline void
  149. intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
  150. {
  151. __intel_wakeref_put(wf,
  152. INTEL_WAKEREF_PUT_ASYNC |
  153. FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
  154. }
  155. static inline void
  156. intel_wakeref_might_put(struct intel_wakeref *wf)
  157. {
  158. might_lock(&wf->mutex);
  159. }
  160. /**
  161. * intel_wakeref_lock: Lock the wakeref (mutex)
  162. * @wf: the wakeref
  163. *
  164. * Locks the wakeref to prevent it being acquired or released. New users
  165. * can still adjust the counter, but the wakeref itself (and callback)
  166. * cannot be acquired or released.
  167. */
  168. static inline void
  169. intel_wakeref_lock(struct intel_wakeref *wf)
  170. __acquires(wf->mutex)
  171. {
  172. mutex_lock(&wf->mutex);
  173. }
  174. /**
  175. * intel_wakeref_unlock: Unlock the wakeref
  176. * @wf: the wakeref
  177. *
  178. * Releases a previously acquired intel_wakeref_lock().
  179. */
  180. static inline void
  181. intel_wakeref_unlock(struct intel_wakeref *wf)
  182. __releases(wf->mutex)
  183. {
  184. mutex_unlock(&wf->mutex);
  185. }
  186. /**
  187. * intel_wakeref_unlock_wait: Wait until the active callback is complete
  188. * @wf: the wakeref
  189. *
  190. * Waits for the active callback (under the @wf->mutex or another CPU) is
  191. * complete.
  192. */
  193. static inline void
  194. intel_wakeref_unlock_wait(struct intel_wakeref *wf)
  195. {
  196. mutex_lock(&wf->mutex);
  197. mutex_unlock(&wf->mutex);
  198. flush_delayed_work(&wf->work);
  199. }
  200. /**
  201. * intel_wakeref_is_active: Query whether the wakeref is currently held
  202. * @wf: the wakeref
  203. *
  204. * Returns: true if the wakeref is currently held.
  205. */
  206. static inline bool
  207. intel_wakeref_is_active(const struct intel_wakeref *wf)
  208. {
  209. return READ_ONCE(wf->wakeref);
  210. }
  211. /**
  212. * __intel_wakeref_defer_park: Defer the current park callback
  213. * @wf: the wakeref
  214. */
  215. static inline void
  216. __intel_wakeref_defer_park(struct intel_wakeref *wf)
  217. {
  218. lockdep_assert_held(&wf->mutex);
  219. INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
  220. atomic_set_release(&wf->count, 1);
  221. }
  222. /**
  223. * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
  224. * @wf: the wakeref
  225. *
  226. * Wait for the earlier asynchronous release of the wakeref. Note
  227. * this will wait for any third party as well, so make sure you only wait
  228. * when you have control over the wakeref and trust no one else is acquiring
  229. * it.
  230. *
  231. * Return: 0 on success, error code if killed.
  232. */
  233. int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
  234. struct intel_wakeref_auto {
  235. struct intel_runtime_pm *rpm;
  236. struct timer_list timer;
  237. intel_wakeref_t wakeref;
  238. spinlock_t lock;
  239. refcount_t count;
  240. };
  241. /**
  242. * intel_wakeref_auto: Delay the runtime-pm autosuspend
  243. * @wf: the wakeref
  244. * @timeout: relative timeout in jiffies
  245. *
  246. * The runtime-pm core uses a suspend delay after the last wakeref
  247. * is released before triggering runtime suspend of the device. That
  248. * delay is configurable via sysfs with little regard to the device
  249. * characteristics. Instead, we want to tune the autosuspend based on our
  250. * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
  251. * timeout.
  252. *
  253. * Pass @timeout = 0 to cancel a previous autosuspend by executing the
  254. * suspend immediately.
  255. */
  256. void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
  257. void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
  258. struct intel_runtime_pm *rpm);
  259. void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
  260. #endif /* INTEL_WAKEREF_H */