dma-fence.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Fence mechanism for dma-buf to allow for asynchronous dma access
  4. *
  5. * Copyright (C) 2012 Canonical Ltd
  6. * Copyright (C) 2012 Texas Instruments
  7. *
  8. * Authors:
  9. * Rob Clark <[email protected]>
  10. * Maarten Lankhorst <[email protected]>
  11. */
  12. #ifndef __LINUX_DMA_FENCE_H
  13. #define __LINUX_DMA_FENCE_H
  14. #include <linux/err.h>
  15. #include <linux/wait.h>
  16. #include <linux/list.h>
  17. #include <linux/bitops.h>
  18. #include <linux/kref.h>
  19. #include <linux/sched.h>
  20. #include <linux/printk.h>
  21. #include <linux/rcupdate.h>
  22. struct dma_fence;
  23. struct dma_fence_ops;
  24. struct dma_fence_cb;
  25. /**
  26. * struct dma_fence - software synchronization primitive
  27. * @refcount: refcount for this fence
  28. * @ops: dma_fence_ops associated with this fence
  29. * @rcu: used for releasing fence with kfree_rcu
  30. * @cb_list: list of all callbacks to call
  31. * @lock: spin_lock_irqsave used for locking
  32. * @context: execution context this fence belongs to, returned by
  33. * dma_fence_context_alloc()
  34. * @seqno: the sequence number of this fence inside the execution context,
  35. * can be compared to decide which fence would be signaled later.
  36. * @flags: A mask of DMA_FENCE_FLAG_* defined below
  37. * @timestamp: Timestamp when the fence was signaled.
  38. * @error: Optional, only valid if < 0, must be set before calling
  39. * dma_fence_signal, indicates that the fence has completed with an error.
  40. *
  41. * the flags member must be manipulated and read using the appropriate
  42. * atomic ops (bit_*), so taking the spinlock will not be needed most
  43. * of the time.
  44. *
  45. * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
  46. * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
  47. * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
  48. * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
  49. * implementer of the fence for its own purposes. Can be used in different
  50. * ways by different fence implementers, so do not rely on this.
  51. *
  52. * Since atomic bitops are used, this is not guaranteed to be the case.
  53. * Particularly, if the bit was set, but dma_fence_signal was called right
  54. * before this bit was set, it would have been able to set the
  55. * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
  56. * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
  57. * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
  58. * after dma_fence_signal was called, any enable_signaling call will have either
  59. * been completed, or never called at all.
  60. */
  61. struct dma_fence {
  62. spinlock_t *lock;
  63. const struct dma_fence_ops *ops;
  64. /*
  65. * We clear the callback list on kref_put so that by the time we
  66. * release the fence it is unused. No one should be adding to the
  67. * cb_list that they don't themselves hold a reference for.
  68. *
  69. * The lifetime of the timestamp is similarly tied to both the
  70. * rcu freelist and the cb_list. The timestamp is only set upon
  71. * signaling while simultaneously notifying the cb_list. Ergo, we
  72. * only use either the cb_list of timestamp. Upon destruction,
  73. * neither are accessible, and so we can use the rcu. This means
  74. * that the cb_list is *only* valid until the signal bit is set,
  75. * and to read either you *must* hold a reference to the fence,
  76. * and not just the rcu_read_lock.
  77. *
  78. * Listed in chronological order.
  79. */
  80. union {
  81. struct list_head cb_list;
  82. /* @cb_list replaced by @timestamp on dma_fence_signal() */
  83. ktime_t timestamp;
  84. /* @timestamp replaced by @rcu on dma_fence_release() */
  85. struct rcu_head rcu;
  86. };
  87. u64 context;
  88. u64 seqno;
  89. unsigned long flags;
  90. struct kref refcount;
  91. int error;
  92. };
  93. enum dma_fence_flag_bits {
  94. DMA_FENCE_FLAG_SIGNALED_BIT,
  95. DMA_FENCE_FLAG_TIMESTAMP_BIT,
  96. DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
  97. DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
  98. };
  99. typedef void (*dma_fence_func_t)(struct dma_fence *fence,
  100. struct dma_fence_cb *cb);
  101. /**
  102. * struct dma_fence_cb - callback for dma_fence_add_callback()
  103. * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
  104. * @func: dma_fence_func_t to call
  105. *
  106. * This struct will be initialized by dma_fence_add_callback(), additional
  107. * data can be passed along by embedding dma_fence_cb in another struct.
  108. */
  109. struct dma_fence_cb {
  110. struct list_head node;
  111. dma_fence_func_t func;
  112. };
  113. /**
  114. * struct dma_fence_ops - operations implemented for fence
  115. *
  116. */
  117. struct dma_fence_ops {
  118. /**
  119. * @use_64bit_seqno:
  120. *
  121. * True if this dma_fence implementation uses 64bit seqno, false
  122. * otherwise.
  123. */
  124. bool use_64bit_seqno;
  125. /**
  126. * @get_driver_name:
  127. *
  128. * Returns the driver name. This is a callback to allow drivers to
  129. * compute the name at runtime, without having it to store permanently
  130. * for each fence, or build a cache of some sort.
  131. *
  132. * This callback is mandatory.
  133. */
  134. const char * (*get_driver_name)(struct dma_fence *fence);
  135. /**
  136. * @get_timeline_name:
  137. *
  138. * Return the name of the context this fence belongs to. This is a
  139. * callback to allow drivers to compute the name at runtime, without
  140. * having it to store permanently for each fence, or build a cache of
  141. * some sort.
  142. *
  143. * This callback is mandatory.
  144. */
  145. const char * (*get_timeline_name)(struct dma_fence *fence);
  146. /**
  147. * @enable_signaling:
  148. *
  149. * Enable software signaling of fence.
  150. *
  151. * For fence implementations that have the capability for hw->hw
  152. * signaling, they can implement this op to enable the necessary
  153. * interrupts, or insert commands into cmdstream, etc, to avoid these
  154. * costly operations for the common case where only hw->hw
  155. * synchronization is required. This is called in the first
  156. * dma_fence_wait() or dma_fence_add_callback() path to let the fence
  157. * implementation know that there is another driver waiting on the
  158. * signal (ie. hw->sw case).
  159. *
  160. * This function can be called from atomic context, but not
  161. * from irq context, so normal spinlocks can be used.
  162. *
  163. * A return value of false indicates the fence already passed,
  164. * or some failure occurred that made it impossible to enable
  165. * signaling. True indicates successful enabling.
  166. *
  167. * &dma_fence.error may be set in enable_signaling, but only when false
  168. * is returned.
  169. *
  170. * Since many implementations can call dma_fence_signal() even when before
  171. * @enable_signaling has been called there's a race window, where the
  172. * dma_fence_signal() might result in the final fence reference being
  173. * released and its memory freed. To avoid this, implementations of this
  174. * callback should grab their own reference using dma_fence_get(), to be
  175. * released when the fence is signalled (through e.g. the interrupt
  176. * handler).
  177. *
  178. * This callback is optional. If this callback is not present, then the
  179. * driver must always have signaling enabled.
  180. */
  181. bool (*enable_signaling)(struct dma_fence *fence);
  182. /**
  183. * @signaled:
  184. *
  185. * Peek whether the fence is signaled, as a fastpath optimization for
  186. * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
  187. * callback does not need to make any guarantees beyond that a fence
  188. * once indicates as signalled must always return true from this
  189. * callback. This callback may return false even if the fence has
  190. * completed already, in this case information hasn't propogated throug
  191. * the system yet. See also dma_fence_is_signaled().
  192. *
  193. * May set &dma_fence.error if returning true.
  194. *
  195. * This callback is optional.
  196. */
  197. bool (*signaled)(struct dma_fence *fence);
  198. /**
  199. * @wait:
  200. *
  201. * Custom wait implementation, defaults to dma_fence_default_wait() if
  202. * not set.
  203. *
  204. * Deprecated and should not be used by new implementations. Only used
  205. * by existing implementations which need special handling for their
  206. * hardware reset procedure.
  207. *
  208. * Must return -ERESTARTSYS if the wait is intr = true and the wait was
  209. * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
  210. * timed out. Can also return other error values on custom implementations,
  211. * which should be treated as if the fence is signaled. For example a hardware
  212. * lockup could be reported like that.
  213. */
  214. signed long (*wait)(struct dma_fence *fence,
  215. bool intr, signed long timeout);
  216. /**
  217. * @release:
  218. *
  219. * Called on destruction of fence to release additional resources.
  220. * Can be called from irq context. This callback is optional. If it is
  221. * NULL, then dma_fence_free() is instead called as the default
  222. * implementation.
  223. */
  224. void (*release)(struct dma_fence *fence);
  225. /**
  226. * @fence_value_str:
  227. *
  228. * Callback to fill in free-form debug info specific to this fence, like
  229. * the sequence number.
  230. *
  231. * This callback is optional.
  232. */
  233. void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
  234. /**
  235. * @timeline_value_str:
  236. *
  237. * Fills in the current value of the timeline as a string, like the
  238. * sequence number. Note that the specific fence passed to this function
  239. * should not matter, drivers should only use it to look up the
  240. * corresponding timeline structures.
  241. */
  242. void (*timeline_value_str)(struct dma_fence *fence,
  243. char *str, int size);
  244. };
  245. void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
  246. spinlock_t *lock, u64 context, u64 seqno);
  247. void dma_fence_release(struct kref *kref);
  248. void dma_fence_free(struct dma_fence *fence);
  249. void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
  250. /**
  251. * dma_fence_put - decreases refcount of the fence
  252. * @fence: fence to reduce refcount of
  253. */
  254. static inline void dma_fence_put(struct dma_fence *fence)
  255. {
  256. if (fence)
  257. kref_put(&fence->refcount, dma_fence_release);
  258. }
  259. /**
  260. * dma_fence_get - increases refcount of the fence
  261. * @fence: fence to increase refcount of
  262. *
  263. * Returns the same fence, with refcount increased by 1.
  264. */
  265. static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
  266. {
  267. if (fence)
  268. kref_get(&fence->refcount);
  269. return fence;
  270. }
  271. /**
  272. * dma_fence_get_rcu - get a fence from a dma_resv_list with
  273. * rcu read lock
  274. * @fence: fence to increase refcount of
  275. *
  276. * Function returns NULL if no refcount could be obtained, or the fence.
  277. */
  278. static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
  279. {
  280. if (kref_get_unless_zero(&fence->refcount))
  281. return fence;
  282. else
  283. return NULL;
  284. }
  285. /**
  286. * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
  287. * @fencep: pointer to fence to increase refcount of
  288. *
  289. * Function returns NULL if no refcount could be obtained, or the fence.
  290. * This function handles acquiring a reference to a fence that may be
  291. * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
  292. * so long as the caller is using RCU on the pointer to the fence.
  293. *
  294. * An alternative mechanism is to employ a seqlock to protect a bunch of
  295. * fences, such as used by struct dma_resv. When using a seqlock,
  296. * the seqlock must be taken before and checked after a reference to the
  297. * fence is acquired (as shown here).
  298. *
  299. * The caller is required to hold the RCU read lock.
  300. */
  301. static inline struct dma_fence *
  302. dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
  303. {
  304. do {
  305. struct dma_fence *fence;
  306. fence = rcu_dereference(*fencep);
  307. if (!fence)
  308. return NULL;
  309. if (!dma_fence_get_rcu(fence))
  310. continue;
  311. /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
  312. * provides a full memory barrier upon success (such as now).
  313. * This is paired with the write barrier from assigning
  314. * to the __rcu protected fence pointer so that if that
  315. * pointer still matches the current fence, we know we
  316. * have successfully acquire a reference to it. If it no
  317. * longer matches, we are holding a reference to some other
  318. * reallocated pointer. This is possible if the allocator
  319. * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
  320. * fence remains valid for the RCU grace period, but it
  321. * may be reallocated. When using such allocators, we are
  322. * responsible for ensuring the reference we get is to
  323. * the right fence, as below.
  324. */
  325. if (fence == rcu_access_pointer(*fencep))
  326. return rcu_pointer_handoff(fence);
  327. dma_fence_put(fence);
  328. } while (1);
  329. }
  330. #ifdef CONFIG_LOCKDEP
  331. bool dma_fence_begin_signalling(void);
  332. void dma_fence_end_signalling(bool cookie);
  333. void __dma_fence_might_wait(void);
  334. #else
  335. static inline bool dma_fence_begin_signalling(void)
  336. {
  337. return true;
  338. }
  339. static inline void dma_fence_end_signalling(bool cookie) {}
  340. static inline void __dma_fence_might_wait(void) {}
  341. #endif
  342. int dma_fence_signal(struct dma_fence *fence);
  343. int dma_fence_signal_locked(struct dma_fence *fence);
  344. int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
  345. int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
  346. ktime_t timestamp);
  347. signed long dma_fence_default_wait(struct dma_fence *fence,
  348. bool intr, signed long timeout);
  349. int dma_fence_add_callback(struct dma_fence *fence,
  350. struct dma_fence_cb *cb,
  351. dma_fence_func_t func);
  352. bool dma_fence_remove_callback(struct dma_fence *fence,
  353. struct dma_fence_cb *cb);
  354. void dma_fence_enable_sw_signaling(struct dma_fence *fence);
  355. /**
  356. * dma_fence_is_signaled_locked - Return an indication if the fence
  357. * is signaled yet.
  358. * @fence: the fence to check
  359. *
  360. * Returns true if the fence was already signaled, false if not. Since this
  361. * function doesn't enable signaling, it is not guaranteed to ever return
  362. * true if dma_fence_add_callback(), dma_fence_wait() or
  363. * dma_fence_enable_sw_signaling() haven't been called before.
  364. *
  365. * This function requires &dma_fence.lock to be held.
  366. *
  367. * See also dma_fence_is_signaled().
  368. */
  369. static inline bool
  370. dma_fence_is_signaled_locked(struct dma_fence *fence)
  371. {
  372. if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  373. return true;
  374. if (fence->ops->signaled && fence->ops->signaled(fence)) {
  375. dma_fence_signal_locked(fence);
  376. return true;
  377. }
  378. return false;
  379. }
  380. /**
  381. * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
  382. * @fence: the fence to check
  383. *
  384. * Returns true if the fence was already signaled, false if not. Since this
  385. * function doesn't enable signaling, it is not guaranteed to ever return
  386. * true if dma_fence_add_callback(), dma_fence_wait() or
  387. * dma_fence_enable_sw_signaling() haven't been called before.
  388. *
  389. * It's recommended for seqno fences to call dma_fence_signal when the
  390. * operation is complete, it makes it possible to prevent issues from
  391. * wraparound between time of issue and time of use by checking the return
  392. * value of this function before calling hardware-specific wait instructions.
  393. *
  394. * See also dma_fence_is_signaled_locked().
  395. */
  396. static inline bool
  397. dma_fence_is_signaled(struct dma_fence *fence)
  398. {
  399. if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  400. return true;
  401. if (fence->ops->signaled && fence->ops->signaled(fence)) {
  402. dma_fence_signal(fence);
  403. return true;
  404. }
  405. return false;
  406. }
  407. /**
  408. * __dma_fence_is_later - return if f1 is chronologically later than f2
  409. * @f1: the first fence's seqno
  410. * @f2: the second fence's seqno from the same context
  411. * @ops: dma_fence_ops associated with the seqno
  412. *
  413. * Returns true if f1 is chronologically later than f2. Both fences must be
  414. * from the same context, since a seqno is not common across contexts.
  415. */
  416. static inline bool __dma_fence_is_later(u64 f1, u64 f2,
  417. const struct dma_fence_ops *ops)
  418. {
  419. /* This is for backward compatibility with drivers which can only handle
  420. * 32bit sequence numbers. Use a 64bit compare when the driver says to
  421. * do so.
  422. */
  423. if (ops->use_64bit_seqno)
  424. return f1 > f2;
  425. return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
  426. }
  427. /**
  428. * dma_fence_is_later - return if f1 is chronologically later than f2
  429. * @f1: the first fence from the same context
  430. * @f2: the second fence from the same context
  431. *
  432. * Returns true if f1 is chronologically later than f2. Both fences must be
  433. * from the same context, since a seqno is not re-used across contexts.
  434. */
  435. static inline bool dma_fence_is_later(struct dma_fence *f1,
  436. struct dma_fence *f2)
  437. {
  438. if (WARN_ON(f1->context != f2->context))
  439. return false;
  440. return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
  441. }
  442. /**
  443. * dma_fence_is_later_or_same - return true if f1 is later or same as f2
  444. * @f1: the first fence from the same context
  445. * @f2: the second fence from the same context
  446. *
  447. * Returns true if f1 is chronologically later than f2 or the same fence. Both
  448. * fences must be from the same context, since a seqno is not re-used across
  449. * contexts.
  450. */
  451. static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
  452. struct dma_fence *f2)
  453. {
  454. return f1 == f2 || dma_fence_is_later(f1, f2);
  455. }
  456. /**
  457. * dma_fence_later - return the chronologically later fence
  458. * @f1: the first fence from the same context
  459. * @f2: the second fence from the same context
  460. *
  461. * Returns NULL if both fences are signaled, otherwise the fence that would be
  462. * signaled last. Both fences must be from the same context, since a seqno is
  463. * not re-used across contexts.
  464. */
  465. static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
  466. struct dma_fence *f2)
  467. {
  468. if (WARN_ON(f1->context != f2->context))
  469. return NULL;
  470. /*
  471. * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
  472. * have been set if enable_signaling wasn't called, and enabling that
  473. * here is overkill.
  474. */
  475. if (dma_fence_is_later(f1, f2))
  476. return dma_fence_is_signaled(f1) ? NULL : f1;
  477. else
  478. return dma_fence_is_signaled(f2) ? NULL : f2;
  479. }
  480. /**
  481. * dma_fence_get_status_locked - returns the status upon completion
  482. * @fence: the dma_fence to query
  483. *
  484. * Drivers can supply an optional error status condition before they signal
  485. * the fence (to indicate whether the fence was completed due to an error
  486. * rather than success). The value of the status condition is only valid
  487. * if the fence has been signaled, dma_fence_get_status_locked() first checks
  488. * the signal state before reporting the error status.
  489. *
  490. * Returns 0 if the fence has not yet been signaled, 1 if the fence has
  491. * been signaled without an error condition, or a negative error code
  492. * if the fence has been completed in err.
  493. */
  494. static inline int dma_fence_get_status_locked(struct dma_fence *fence)
  495. {
  496. if (dma_fence_is_signaled_locked(fence))
  497. return fence->error ?: 1;
  498. else
  499. return 0;
  500. }
  501. int dma_fence_get_status(struct dma_fence *fence);
  502. /**
  503. * dma_fence_set_error - flag an error condition on the fence
  504. * @fence: the dma_fence
  505. * @error: the error to store
  506. *
  507. * Drivers can supply an optional error status condition before they signal
  508. * the fence, to indicate that the fence was completed due to an error
  509. * rather than success. This must be set before signaling (so that the value
  510. * is visible before any waiters on the signal callback are woken). This
  511. * helper exists to help catching erroneous setting of #dma_fence.error.
  512. */
  513. static inline void dma_fence_set_error(struct dma_fence *fence,
  514. int error)
  515. {
  516. WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
  517. WARN_ON(error >= 0 || error < -MAX_ERRNO);
  518. fence->error = error;
  519. }
  520. /**
  521. * dma_fence_timestamp - helper to get the completion timestamp of a fence
  522. * @fence: fence to get the timestamp from.
  523. *
  524. * After a fence is signaled the timestamp is updated with the signaling time,
  525. * but setting the timestamp can race with tasks waiting for the signaling. This
  526. * helper busy waits for the correct timestamp to appear.
  527. */
  528. static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
  529. {
  530. if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
  531. return ktime_get();
  532. while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
  533. cpu_relax();
  534. return fence->timestamp;
  535. }
  536. signed long dma_fence_wait_timeout(struct dma_fence *,
  537. bool intr, signed long timeout);
  538. signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
  539. uint32_t count,
  540. bool intr, signed long timeout,
  541. uint32_t *idx);
  542. /**
  543. * dma_fence_wait - sleep until the fence gets signaled
  544. * @fence: the fence to wait on
  545. * @intr: if true, do an interruptible wait
  546. *
  547. * This function will return -ERESTARTSYS if interrupted by a signal,
  548. * or 0 if the fence was signaled. Other error values may be
  549. * returned on custom implementations.
  550. *
  551. * Performs a synchronous wait on this fence. It is assumed the caller
  552. * directly or indirectly holds a reference to the fence, otherwise the
  553. * fence might be freed before return, resulting in undefined behavior.
  554. *
  555. * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
  556. */
  557. static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
  558. {
  559. signed long ret;
  560. /* Since dma_fence_wait_timeout cannot timeout with
  561. * MAX_SCHEDULE_TIMEOUT, only valid return values are
  562. * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
  563. */
  564. ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
  565. return ret < 0 ? ret : 0;
  566. }
  567. struct dma_fence *dma_fence_get_stub(void);
  568. struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
  569. u64 dma_fence_context_alloc(unsigned num);
  570. extern const struct dma_fence_ops dma_fence_array_ops;
  571. extern const struct dma_fence_ops dma_fence_chain_ops;
  572. /**
  573. * dma_fence_is_array - check if a fence is from the array subclass
  574. * @fence: the fence to test
  575. *
  576. * Return true if it is a dma_fence_array and false otherwise.
  577. */
  578. static inline bool dma_fence_is_array(struct dma_fence *fence)
  579. {
  580. return fence->ops == &dma_fence_array_ops;
  581. }
  582. /**
  583. * dma_fence_is_chain - check if a fence is from the chain subclass
  584. * @fence: the fence to test
  585. *
  586. * Return true if it is a dma_fence_chain and false otherwise.
  587. */
  588. static inline bool dma_fence_is_chain(struct dma_fence *fence)
  589. {
  590. return fence->ops == &dma_fence_chain_ops;
  591. }
  592. /**
  593. * dma_fence_is_container - check if a fence is a container for other fences
  594. * @fence: the fence to test
  595. *
  596. * Return true if this fence is a container for other fences, false otherwise.
  597. * This is important since we can't build up large fence structure or otherwise
  598. * we run into recursion during operation on those fences.
  599. */
  600. static inline bool dma_fence_is_container(struct dma_fence *fence)
  601. {
  602. return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
  603. }
  604. #endif /* __LINUX_DMA_FENCE_H */