dma-resv.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. /*
  2. * Header file for reservations for dma-buf and ttm
  3. *
  4. * Copyright(C) 2011 Linaro Limited. All rights reserved.
  5. * Copyright (C) 2012-2013 Canonical Ltd
  6. * Copyright (C) 2012 Texas Instruments
  7. *
  8. * Authors:
  9. * Rob Clark <[email protected]>
  10. * Maarten Lankhorst <[email protected]>
  11. * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  12. *
  13. * Based on bo.c which bears the following copyright notice,
  14. * but is dual licensed:
  15. *
  16. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  17. * All Rights Reserved.
  18. *
  19. * Permission is hereby granted, free of charge, to any person obtaining a
  20. * copy of this software and associated documentation files (the
  21. * "Software"), to deal in the Software without restriction, including
  22. * without limitation the rights to use, copy, modify, merge, publish,
  23. * distribute, sub license, and/or sell copies of the Software, and to
  24. * permit persons to whom the Software is furnished to do so, subject to
  25. * the following conditions:
  26. *
  27. * The above copyright notice and this permission notice (including the
  28. * next paragraph) shall be included in all copies or substantial portions
  29. * of the Software.
  30. *
  31. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  32. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  33. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  34. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  35. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  36. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  37. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  38. */
  39. #ifndef _LINUX_RESERVATION_H
  40. #define _LINUX_RESERVATION_H
  41. #include <linux/ww_mutex.h>
  42. #include <linux/dma-fence.h>
  43. #include <linux/slab.h>
  44. #include <linux/seqlock.h>
  45. #include <linux/rcupdate.h>
  46. extern struct ww_class reservation_ww_class;
  47. struct dma_resv_list;
  48. /**
  49. * enum dma_resv_usage - how the fences from a dma_resv obj are used
  50. *
  51. * This enum describes the different use cases for a dma_resv object and
  52. * controls which fences are returned when queried.
  53. *
  54. * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
  55. * when the dma_resv object is asked for fences for one use case the fences
  56. * for the lower use case are returned as well.
  57. *
  58. * For example when asking for WRITE fences then the KERNEL fences are returned
  59. * as well. Similar when asked for READ fences then both WRITE and KERNEL
  60. * fences are returned as well.
  61. *
  62. * Already used fences can be promoted in the sense that a fence with
  63. * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
  64. * with this usage. But fences can never be degraded in the sense that a fence
  65. * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
  66. */
  67. enum dma_resv_usage {
  68. /**
  69. * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
  70. *
  71. * This should only be used for things like copying or clearing memory
  72. * with a DMA hardware engine for the purpose of kernel memory
  73. * management.
  74. *
  75. * Drivers *always* must wait for those fences before accessing the
  76. * resource protected by the dma_resv object. The only exception for
  77. * that is when the resource is known to be locked down in place by
  78. * pinning it previously.
  79. */
  80. DMA_RESV_USAGE_KERNEL,
  81. /**
  82. * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
  83. *
  84. * This should only be used for userspace command submissions which add
  85. * an implicit write dependency.
  86. */
  87. DMA_RESV_USAGE_WRITE,
  88. /**
  89. * @DMA_RESV_USAGE_READ: Implicit read synchronization.
  90. *
  91. * This should only be used for userspace command submissions which add
  92. * an implicit read dependency.
  93. */
  94. DMA_RESV_USAGE_READ,
  95. /**
  96. * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
  97. *
  98. * This should be used by submissions which don't want to participate in
  99. * any implicit synchronization.
  100. *
  101. * The most common case are preemption fences, page table updates, TLB
  102. * flushes as well as explicit synced user submissions.
  103. *
  104. * Explicit synced user user submissions can be promoted to
  105. * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
  106. * dma_buf_import_sync_file() when implicit synchronization should
  107. * become necessary after initial adding of the fence.
  108. */
  109. DMA_RESV_USAGE_BOOKKEEP
  110. };
  111. /**
  112. * dma_resv_usage_rw - helper for implicit sync
  113. * @write: true if we create a new implicit sync write
  114. *
  115. * This returns the implicit synchronization usage for write or read accesses,
  116. * see enum dma_resv_usage and &dma_buf.resv.
  117. */
  118. static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
  119. {
  120. /* This looks confusing at first sight, but is indeed correct.
  121. *
  122. * The rational is that new write operations needs to wait for the
  123. * existing read and write operations to finish.
  124. * But a new read operation only needs to wait for the existing write
  125. * operations to finish.
  126. */
  127. return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
  128. }
  129. /**
  130. * struct dma_resv - a reservation object manages fences for a buffer
  131. *
  132. * This is a container for dma_fence objects which needs to handle multiple use
  133. * cases.
  134. *
  135. * One use is to synchronize cross-driver access to a struct dma_buf, either for
  136. * dynamic buffer management or just to handle implicit synchronization between
  137. * different users of the buffer in userspace. See &dma_buf.resv for a more
  138. * in-depth discussion.
  139. *
  140. * The other major use is to manage access and locking within a driver in a
  141. * buffer based memory manager. struct ttm_buffer_object is the canonical
  142. * example here, since this is where reservation objects originated from. But
  143. * use in drivers is spreading and some drivers also manage struct
  144. * drm_gem_object with the same scheme.
  145. */
  146. struct dma_resv {
  147. /**
  148. * @lock:
  149. *
  150. * Update side lock. Don't use directly, instead use the wrapper
  151. * functions like dma_resv_lock() and dma_resv_unlock().
  152. *
  153. * Drivers which use the reservation object to manage memory dynamically
  154. * also use this lock to protect buffer object state like placement,
  155. * allocation policies or throughout command submission.
  156. */
  157. struct ww_mutex lock;
  158. /**
  159. * @fences:
  160. *
  161. * Array of fences which where added to the dma_resv object
  162. *
  163. * A new fence is added by calling dma_resv_add_fence(). Since this
  164. * often needs to be done past the point of no return in command
  165. * submission it cannot fail, and therefore sufficient slots need to be
  166. * reserved by calling dma_resv_reserve_fences().
  167. */
  168. struct dma_resv_list __rcu *fences;
  169. };
  170. /**
  171. * struct dma_resv_iter - current position into the dma_resv fences
  172. *
  173. * Don't touch this directly in the driver, use the accessor function instead.
  174. *
  175. * IMPORTANT
  176. *
  177. * When using the lockless iterators like dma_resv_iter_next_unlocked() or
  178. * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
  179. * Code which accumulates statistics or similar needs to check for this with
  180. * dma_resv_iter_is_restarted().
  181. */
  182. struct dma_resv_iter {
  183. /** @obj: The dma_resv object we iterate over */
  184. struct dma_resv *obj;
  185. /** @usage: Return fences with this usage or lower. */
  186. enum dma_resv_usage usage;
  187. /** @fence: the currently handled fence */
  188. struct dma_fence *fence;
  189. /** @fence_usage: the usage of the current fence */
  190. enum dma_resv_usage fence_usage;
  191. /** @index: index into the shared fences */
  192. unsigned int index;
  193. /** @fences: the shared fences; private, *MUST* not dereference */
  194. struct dma_resv_list *fences;
  195. /** @num_fences: number of fences */
  196. unsigned int num_fences;
  197. /** @is_restarted: true if this is the first returned fence */
  198. bool is_restarted;
  199. };
  200. struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
  201. struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
  202. struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
  203. struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
  204. /**
  205. * dma_resv_iter_begin - initialize a dma_resv_iter object
  206. * @cursor: The dma_resv_iter object to initialize
  207. * @obj: The dma_resv object which we want to iterate over
  208. * @usage: controls which fences to include, see enum dma_resv_usage.
  209. */
  210. static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
  211. struct dma_resv *obj,
  212. enum dma_resv_usage usage)
  213. {
  214. cursor->obj = obj;
  215. cursor->usage = usage;
  216. cursor->fence = NULL;
  217. }
  218. /**
  219. * dma_resv_iter_end - cleanup a dma_resv_iter object
  220. * @cursor: the dma_resv_iter object which should be cleaned up
  221. *
  222. * Make sure that the reference to the fence in the cursor is properly
  223. * dropped.
  224. */
  225. static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
  226. {
  227. dma_fence_put(cursor->fence);
  228. }
  229. /**
  230. * dma_resv_iter_usage - Return the usage of the current fence
  231. * @cursor: the cursor of the current position
  232. *
  233. * Returns the usage of the currently processed fence.
  234. */
  235. static inline enum dma_resv_usage
  236. dma_resv_iter_usage(struct dma_resv_iter *cursor)
  237. {
  238. return cursor->fence_usage;
  239. }
  240. /**
  241. * dma_resv_iter_is_restarted - test if this is the first fence after a restart
  242. * @cursor: the cursor with the current position
  243. *
  244. * Return true if this is the first fence in an iteration after a restart.
  245. */
  246. static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
  247. {
  248. return cursor->is_restarted;
  249. }
  250. /**
  251. * dma_resv_for_each_fence_unlocked - unlocked fence iterator
  252. * @cursor: a struct dma_resv_iter pointer
  253. * @fence: the current fence
  254. *
  255. * Iterate over the fences in a struct dma_resv object without holding the
  256. * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
  257. * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
  258. * the iterator a reference to the dma_fence is held and the RCU lock dropped.
  259. *
  260. * Beware that the iterator can be restarted when the struct dma_resv for
  261. * @cursor is modified. Code which accumulates statistics or similar needs to
  262. * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
  263. * lock iterator dma_resv_for_each_fence() whenever possible.
  264. */
  265. #define dma_resv_for_each_fence_unlocked(cursor, fence) \
  266. for (fence = dma_resv_iter_first_unlocked(cursor); \
  267. fence; fence = dma_resv_iter_next_unlocked(cursor))
  268. /**
  269. * dma_resv_for_each_fence - fence iterator
  270. * @cursor: a struct dma_resv_iter pointer
  271. * @obj: a dma_resv object pointer
  272. * @usage: controls which fences to return
  273. * @fence: the current fence
  274. *
  275. * Iterate over the fences in a struct dma_resv object while holding the
  276. * &dma_resv.lock. @all_fences controls if the shared fences are returned as
  277. * well. The cursor initialisation is part of the iterator and the fence stays
  278. * valid as long as the lock is held and so no extra reference to the fence is
  279. * taken.
  280. */
  281. #define dma_resv_for_each_fence(cursor, obj, usage, fence) \
  282. for (dma_resv_iter_begin(cursor, obj, usage), \
  283. fence = dma_resv_iter_first(cursor); fence; \
  284. fence = dma_resv_iter_next(cursor))
  285. #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
  286. #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
  287. #ifdef CONFIG_DEBUG_MUTEXES
  288. void dma_resv_reset_max_fences(struct dma_resv *obj);
  289. #else
  290. static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
  291. #endif
  292. /**
  293. * dma_resv_lock - lock the reservation object
  294. * @obj: the reservation object
  295. * @ctx: the locking context
  296. *
  297. * Locks the reservation object for exclusive access and modification. Note,
  298. * that the lock is only against other writers, readers will run concurrently
  299. * with a writer under RCU. The seqlock is used to notify readers if they
  300. * overlap with a writer.
  301. *
  302. * As the reservation object may be locked by multiple parties in an
  303. * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
  304. * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
  305. * object may be locked by itself by passing NULL as @ctx.
  306. *
  307. * When a die situation is indicated by returning -EDEADLK all locks held by
  308. * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
  309. *
  310. * Unlocked by calling dma_resv_unlock().
  311. *
  312. * See also dma_resv_lock_interruptible() for the interruptible variant.
  313. */
  314. static inline int dma_resv_lock(struct dma_resv *obj,
  315. struct ww_acquire_ctx *ctx)
  316. {
  317. return ww_mutex_lock(&obj->lock, ctx);
  318. }
  319. /**
  320. * dma_resv_lock_interruptible - lock the reservation object
  321. * @obj: the reservation object
  322. * @ctx: the locking context
  323. *
  324. * Locks the reservation object interruptible for exclusive access and
  325. * modification. Note, that the lock is only against other writers, readers
  326. * will run concurrently with a writer under RCU. The seqlock is used to
  327. * notify readers if they overlap with a writer.
  328. *
  329. * As the reservation object may be locked by multiple parties in an
  330. * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
  331. * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
  332. * object may be locked by itself by passing NULL as @ctx.
  333. *
  334. * When a die situation is indicated by returning -EDEADLK all locks held by
  335. * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
  336. * @obj.
  337. *
  338. * Unlocked by calling dma_resv_unlock().
  339. */
  340. static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
  341. struct ww_acquire_ctx *ctx)
  342. {
  343. return ww_mutex_lock_interruptible(&obj->lock, ctx);
  344. }
  345. /**
  346. * dma_resv_lock_slow - slowpath lock the reservation object
  347. * @obj: the reservation object
  348. * @ctx: the locking context
  349. *
  350. * Acquires the reservation object after a die case. This function
  351. * will sleep until the lock becomes available. See dma_resv_lock() as
  352. * well.
  353. *
  354. * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
  355. */
  356. static inline void dma_resv_lock_slow(struct dma_resv *obj,
  357. struct ww_acquire_ctx *ctx)
  358. {
  359. ww_mutex_lock_slow(&obj->lock, ctx);
  360. }
  361. /**
  362. * dma_resv_lock_slow_interruptible - slowpath lock the reservation
  363. * object, interruptible
  364. * @obj: the reservation object
  365. * @ctx: the locking context
  366. *
  367. * Acquires the reservation object interruptible after a die case. This function
  368. * will sleep until the lock becomes available. See
  369. * dma_resv_lock_interruptible() as well.
  370. */
  371. static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
  372. struct ww_acquire_ctx *ctx)
  373. {
  374. return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
  375. }
  376. /**
  377. * dma_resv_trylock - trylock the reservation object
  378. * @obj: the reservation object
  379. *
  380. * Tries to lock the reservation object for exclusive access and modification.
  381. * Note, that the lock is only against other writers, readers will run
  382. * concurrently with a writer under RCU. The seqlock is used to notify readers
  383. * if they overlap with a writer.
  384. *
  385. * Also note that since no context is provided, no deadlock protection is
  386. * possible, which is also not needed for a trylock.
  387. *
  388. * Returns true if the lock was acquired, false otherwise.
  389. */
  390. static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
  391. {
  392. return ww_mutex_trylock(&obj->lock, NULL);
  393. }
  394. /**
  395. * dma_resv_is_locked - is the reservation object locked
  396. * @obj: the reservation object
  397. *
  398. * Returns true if the mutex is locked, false if unlocked.
  399. */
  400. static inline bool dma_resv_is_locked(struct dma_resv *obj)
  401. {
  402. return ww_mutex_is_locked(&obj->lock);
  403. }
  404. /**
  405. * dma_resv_locking_ctx - returns the context used to lock the object
  406. * @obj: the reservation object
  407. *
  408. * Returns the context used to lock a reservation object or NULL if no context
  409. * was used or the object is not locked at all.
  410. *
  411. * WARNING: This interface is pretty horrible, but TTM needs it because it
  412. * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
  413. * Everyone else just uses it to check whether they're holding a reservation or
  414. * not.
  415. */
  416. static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
  417. {
  418. return READ_ONCE(obj->lock.ctx);
  419. }
  420. /**
  421. * dma_resv_unlock - unlock the reservation object
  422. * @obj: the reservation object
  423. *
  424. * Unlocks the reservation object following exclusive access.
  425. */
  426. static inline void dma_resv_unlock(struct dma_resv *obj)
  427. {
  428. dma_resv_reset_max_fences(obj);
  429. ww_mutex_unlock(&obj->lock);
  430. }
  431. void dma_resv_init(struct dma_resv *obj);
  432. void dma_resv_fini(struct dma_resv *obj);
  433. int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
  434. void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
  435. enum dma_resv_usage usage);
  436. void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
  437. struct dma_fence *fence,
  438. enum dma_resv_usage usage);
  439. int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
  440. unsigned int *num_fences, struct dma_fence ***fences);
  441. int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
  442. struct dma_fence **fence);
  443. int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
  444. long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
  445. bool intr, unsigned long timeout);
  446. bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
  447. void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
  448. #endif /* _LINUX_RESERVATION_H */