dma-fence-array.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * dma-fence-array: aggregate fences to be waited together
  4. *
  5. * Copyright (C) 2016 Collabora Ltd
  6. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  7. * Authors:
  8. * Gustavo Padovan <[email protected]>
  9. * Christian König <[email protected]>
  10. */
  11. #include <linux/export.h>
  12. #include <linux/slab.h>
  13. #include <linux/dma-fence-array.h>
  14. #define PENDING_ERROR 1
  15. static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
  16. {
  17. return "dma_fence_array";
  18. }
  19. static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
  20. {
  21. return "unbound";
  22. }
  23. static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
  24. int error)
  25. {
  26. /*
  27. * Propagate the first error reported by any of our fences, but only
  28. * before we ourselves are signaled.
  29. */
  30. if (error)
  31. cmpxchg(&array->base.error, PENDING_ERROR, error);
  32. }
  33. static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
  34. {
  35. /* Clear the error flag if not actually set. */
  36. cmpxchg(&array->base.error, PENDING_ERROR, 0);
  37. }
  38. static void irq_dma_fence_array_work(struct irq_work *wrk)
  39. {
  40. struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
  41. dma_fence_array_clear_pending_error(array);
  42. dma_fence_signal(&array->base);
  43. dma_fence_put(&array->base);
  44. }
  45. static void dma_fence_array_cb_func(struct dma_fence *f,
  46. struct dma_fence_cb *cb)
  47. {
  48. struct dma_fence_array_cb *array_cb =
  49. container_of(cb, struct dma_fence_array_cb, cb);
  50. struct dma_fence_array *array = array_cb->array;
  51. dma_fence_array_set_pending_error(array, f->error);
  52. if (atomic_dec_and_test(&array->num_pending))
  53. irq_work_queue(&array->work);
  54. else
  55. dma_fence_put(&array->base);
  56. }
  57. static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
  58. {
  59. struct dma_fence_array *array = to_dma_fence_array(fence);
  60. struct dma_fence_array_cb *cb = (void *)(&array[1]);
  61. unsigned i;
  62. for (i = 0; i < array->num_fences; ++i) {
  63. cb[i].array = array;
  64. /*
  65. * As we may report that the fence is signaled before all
  66. * callbacks are complete, we need to take an additional
  67. * reference count on the array so that we do not free it too
  68. * early. The core fence handling will only hold the reference
  69. * until we signal the array as complete (but that is now
  70. * insufficient).
  71. */
  72. dma_fence_get(&array->base);
  73. if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
  74. dma_fence_array_cb_func)) {
  75. int error = array->fences[i]->error;
  76. dma_fence_array_set_pending_error(array, error);
  77. dma_fence_put(&array->base);
  78. if (atomic_dec_and_test(&array->num_pending)) {
  79. dma_fence_array_clear_pending_error(array);
  80. return false;
  81. }
  82. }
  83. }
  84. return true;
  85. }
  86. static bool dma_fence_array_signaled(struct dma_fence *fence)
  87. {
  88. struct dma_fence_array *array = to_dma_fence_array(fence);
  89. if (atomic_read(&array->num_pending) > 0)
  90. return false;
  91. dma_fence_array_clear_pending_error(array);
  92. return true;
  93. }
  94. static void dma_fence_array_release(struct dma_fence *fence)
  95. {
  96. struct dma_fence_array *array = to_dma_fence_array(fence);
  97. unsigned i;
  98. for (i = 0; i < array->num_fences; ++i)
  99. dma_fence_put(array->fences[i]);
  100. kfree(array->fences);
  101. dma_fence_free(fence);
  102. }
  103. const struct dma_fence_ops dma_fence_array_ops = {
  104. .get_driver_name = dma_fence_array_get_driver_name,
  105. .get_timeline_name = dma_fence_array_get_timeline_name,
  106. .enable_signaling = dma_fence_array_enable_signaling,
  107. .signaled = dma_fence_array_signaled,
  108. .release = dma_fence_array_release,
  109. };
  110. EXPORT_SYMBOL(dma_fence_array_ops);
  111. /**
  112. * dma_fence_array_create - Create a custom fence array
  113. * @num_fences: [in] number of fences to add in the array
  114. * @fences: [in] array containing the fences
  115. * @context: [in] fence context to use
  116. * @seqno: [in] sequence number to use
  117. * @signal_on_any: [in] signal on any fence in the array
  118. *
  119. * Allocate a dma_fence_array object and initialize the base fence with
  120. * dma_fence_init().
  121. * In case of error it returns NULL.
  122. *
  123. * The caller should allocate the fences array with num_fences size
  124. * and fill it with the fences it wants to add to the object. Ownership of this
  125. * array is taken and dma_fence_put() is used on each fence on release.
  126. *
  127. * If @signal_on_any is true the fence array signals if any fence in the array
  128. * signals, otherwise it signals when all fences in the array signal.
  129. */
  130. struct dma_fence_array *dma_fence_array_create(int num_fences,
  131. struct dma_fence **fences,
  132. u64 context, unsigned seqno,
  133. bool signal_on_any)
  134. {
  135. struct dma_fence_array *array;
  136. size_t size = sizeof(*array);
  137. WARN_ON(!num_fences || !fences);
  138. /* Allocate the callback structures behind the array. */
  139. size += num_fences * sizeof(struct dma_fence_array_cb);
  140. array = kzalloc(size, GFP_KERNEL);
  141. if (!array)
  142. return NULL;
  143. spin_lock_init(&array->lock);
  144. dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
  145. context, seqno);
  146. init_irq_work(&array->work, irq_dma_fence_array_work);
  147. array->num_fences = num_fences;
  148. atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
  149. array->fences = fences;
  150. array->base.error = PENDING_ERROR;
  151. /*
  152. * dma_fence_array objects should never contain any other fence
  153. * containers or otherwise we run into recursion and potential kernel
  154. * stack overflow on operations on the dma_fence_array.
  155. *
  156. * The correct way of handling this is to flatten out the array by the
  157. * caller instead.
  158. *
  159. * Enforce this here by checking that we don't create a dma_fence_array
  160. * with any container inside.
  161. */
  162. while (num_fences--)
  163. WARN_ON(dma_fence_is_container(fences[num_fences]));
  164. return array;
  165. }
  166. EXPORT_SYMBOL(dma_fence_array_create);
  167. /**
  168. * dma_fence_match_context - Check if all fences are from the given context
  169. * @fence: [in] fence or fence array
  170. * @context: [in] fence context to check all fences against
  171. *
  172. * Checks the provided fence or, for a fence array, all fences in the array
  173. * against the given context. Returns false if any fence is from a different
  174. * context.
  175. */
  176. bool dma_fence_match_context(struct dma_fence *fence, u64 context)
  177. {
  178. struct dma_fence_array *array = to_dma_fence_array(fence);
  179. unsigned i;
  180. if (!dma_fence_is_array(fence))
  181. return fence->context == context;
  182. for (i = 0; i < array->num_fences; i++) {
  183. if (array->fences[i]->context != context)
  184. return false;
  185. }
  186. return true;
  187. }
  188. EXPORT_SYMBOL(dma_fence_match_context);
  189. struct dma_fence *dma_fence_array_first(struct dma_fence *head)
  190. {
  191. struct dma_fence_array *array;
  192. if (!head)
  193. return NULL;
  194. array = to_dma_fence_array(head);
  195. if (!array)
  196. return head;
  197. if (!array->num_fences)
  198. return NULL;
  199. return array->fences[0];
  200. }
  201. EXPORT_SYMBOL(dma_fence_array_first);
  202. struct dma_fence *dma_fence_array_next(struct dma_fence *head,
  203. unsigned int index)
  204. {
  205. struct dma_fence_array *array = to_dma_fence_array(head);
  206. if (!array || index >= array->num_fences)
  207. return NULL;
  208. return array->fences[index];
  209. }
  210. EXPORT_SYMBOL(dma_fence_array_next);