fence.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Syncpoint dma_fence implementation
  4. *
  5. * Copyright (c) 2020, NVIDIA Corporation.
  6. */
  7. #include <linux/dma-fence.h>
  8. #include <linux/file.h>
  9. #include <linux/fs.h>
  10. #include <linux/slab.h>
  11. #include <linux/sync_file.h>
  12. #include "fence.h"
  13. #include "intr.h"
  14. #include "syncpt.h"
  15. static DEFINE_SPINLOCK(lock);
  16. struct host1x_syncpt_fence {
  17. struct dma_fence base;
  18. atomic_t signaling;
  19. struct host1x_syncpt *sp;
  20. u32 threshold;
  21. struct host1x_waitlist *waiter;
  22. void *waiter_ref;
  23. struct delayed_work timeout_work;
  24. };
  25. static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
  26. {
  27. return "host1x";
  28. }
  29. static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
  30. {
  31. return "syncpoint";
  32. }
  33. static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
  34. {
  35. return container_of(f, struct host1x_syncpt_fence, base);
  36. }
  37. static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
  38. {
  39. struct host1x_syncpt_fence *sf = to_host1x_fence(f);
  40. int err;
  41. if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
  42. return false;
  43. dma_fence_get(f);
  44. /*
  45. * The dma_fence framework requires the fence driver to keep a
  46. * reference to any fences for which 'enable_signaling' has been
  47. * called (and that have not been signalled).
  48. *
  49. * We provide a userspace API to create arbitrary syncpoint fences,
  50. * so we cannot normally guarantee that all fences get signalled.
  51. * As such, setup a timeout, so that long-lasting fences will get
  52. * reaped eventually.
  53. */
  54. schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
  55. err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
  56. HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
  57. sf->waiter, &sf->waiter_ref);
  58. if (err) {
  59. cancel_delayed_work_sync(&sf->timeout_work);
  60. dma_fence_put(f);
  61. return false;
  62. }
  63. /* intr framework takes ownership of waiter */
  64. sf->waiter = NULL;
  65. /*
  66. * The fence may get signalled at any time after the above call,
  67. * so we need to initialize all state used by signalling
  68. * before it.
  69. */
  70. return true;
  71. }
  72. static void host1x_syncpt_fence_release(struct dma_fence *f)
  73. {
  74. struct host1x_syncpt_fence *sf = to_host1x_fence(f);
  75. if (sf->waiter)
  76. kfree(sf->waiter);
  77. dma_fence_free(f);
  78. }
  79. const struct dma_fence_ops host1x_syncpt_fence_ops = {
  80. .get_driver_name = host1x_syncpt_fence_get_driver_name,
  81. .get_timeline_name = host1x_syncpt_fence_get_timeline_name,
  82. .enable_signaling = host1x_syncpt_fence_enable_signaling,
  83. .release = host1x_syncpt_fence_release,
  84. };
  85. void host1x_fence_signal(struct host1x_syncpt_fence *f)
  86. {
  87. if (atomic_xchg(&f->signaling, 1))
  88. return;
  89. /*
  90. * Cancel pending timeout work - if it races, it will
  91. * not get 'f->signaling' and return.
  92. */
  93. cancel_delayed_work_sync(&f->timeout_work);
  94. host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
  95. dma_fence_signal(&f->base);
  96. dma_fence_put(&f->base);
  97. }
  98. static void do_fence_timeout(struct work_struct *work)
  99. {
  100. struct delayed_work *dwork = (struct delayed_work *)work;
  101. struct host1x_syncpt_fence *f =
  102. container_of(dwork, struct host1x_syncpt_fence, timeout_work);
  103. if (atomic_xchg(&f->signaling, 1))
  104. return;
  105. /*
  106. * Cancel pending timeout work - if it races, it will
  107. * not get 'f->signaling' and return.
  108. */
  109. host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
  110. dma_fence_set_error(&f->base, -ETIMEDOUT);
  111. dma_fence_signal(&f->base);
  112. dma_fence_put(&f->base);
  113. }
  114. struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
  115. {
  116. struct host1x_syncpt_fence *fence;
  117. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  118. if (!fence)
  119. return ERR_PTR(-ENOMEM);
  120. fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
  121. if (!fence->waiter) {
  122. kfree(fence);
  123. return ERR_PTR(-ENOMEM);
  124. }
  125. fence->sp = sp;
  126. fence->threshold = threshold;
  127. dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
  128. dma_fence_context_alloc(1), 0);
  129. INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
  130. return &fence->base;
  131. }
  132. EXPORT_SYMBOL(host1x_fence_create);