intr.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Tegra host1x Interrupt Management
  4. *
  5. * Copyright (c) 2010-2013, NVIDIA Corporation.
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/slab.h>
  10. #include <linux/irq.h>
  11. #include <trace/events/host1x.h>
  12. #include "channel.h"
  13. #include "dev.h"
  14. #include "fence.h"
  15. #include "intr.h"
  16. /* Wait list management */
  17. enum waitlist_state {
  18. WLS_PENDING,
  19. WLS_REMOVED,
  20. WLS_CANCELLED,
  21. WLS_HANDLED
  22. };
  23. static void waiter_release(struct kref *kref)
  24. {
  25. kfree(container_of(kref, struct host1x_waitlist, refcount));
  26. }
  27. /*
  28. * add a waiter to a waiter queue, sorted by threshold
  29. * returns true if it was added at the head of the queue
  30. */
  31. static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
  32. struct list_head *queue)
  33. {
  34. struct host1x_waitlist *pos;
  35. u32 thresh = waiter->thresh;
  36. list_for_each_entry_reverse(pos, queue, list)
  37. if ((s32)(pos->thresh - thresh) <= 0) {
  38. list_add(&waiter->list, &pos->list);
  39. return false;
  40. }
  41. list_add(&waiter->list, queue);
  42. return true;
  43. }
  44. /*
  45. * run through a waiter queue for a single sync point ID
  46. * and gather all completed waiters into lists by actions
  47. */
  48. static void remove_completed_waiters(struct list_head *head, u32 sync,
  49. struct list_head completed[HOST1X_INTR_ACTION_COUNT])
  50. {
  51. struct list_head *dest;
  52. struct host1x_waitlist *waiter, *next, *prev;
  53. list_for_each_entry_safe(waiter, next, head, list) {
  54. if ((s32)(waiter->thresh - sync) > 0)
  55. break;
  56. dest = completed + waiter->action;
  57. /* consolidate submit cleanups */
  58. if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
  59. !list_empty(dest)) {
  60. prev = list_entry(dest->prev,
  61. struct host1x_waitlist, list);
  62. if (prev->data == waiter->data) {
  63. prev->count++;
  64. dest = NULL;
  65. }
  66. }
  67. /* PENDING->REMOVED or CANCELLED->HANDLED */
  68. if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
  69. list_del(&waiter->list);
  70. kref_put(&waiter->refcount, waiter_release);
  71. } else
  72. list_move_tail(&waiter->list, dest);
  73. }
  74. }
  75. static void reset_threshold_interrupt(struct host1x *host,
  76. struct list_head *head,
  77. unsigned int id)
  78. {
  79. u32 thresh =
  80. list_first_entry(head, struct host1x_waitlist, list)->thresh;
  81. host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
  82. host1x_hw_intr_enable_syncpt_intr(host, id);
  83. }
  84. static void action_submit_complete(struct host1x_waitlist *waiter)
  85. {
  86. struct host1x_channel *channel = waiter->data;
  87. host1x_cdma_update(&channel->cdma);
  88. /* Add nr_completed to trace */
  89. trace_host1x_channel_submit_complete(dev_name(channel->dev),
  90. waiter->count, waiter->thresh);
  91. }
  92. static void action_wakeup(struct host1x_waitlist *waiter)
  93. {
  94. wait_queue_head_t *wq = waiter->data;
  95. wake_up(wq);
  96. }
  97. static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
  98. {
  99. wait_queue_head_t *wq = waiter->data;
  100. wake_up_interruptible(wq);
  101. }
  102. static void action_signal_fence(struct host1x_waitlist *waiter)
  103. {
  104. struct host1x_syncpt_fence *f = waiter->data;
  105. host1x_fence_signal(f);
  106. }
  107. typedef void (*action_handler)(struct host1x_waitlist *waiter);
  108. static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
  109. action_submit_complete,
  110. action_wakeup,
  111. action_wakeup_interruptible,
  112. action_signal_fence,
  113. };
  114. static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
  115. {
  116. struct list_head *head = completed;
  117. unsigned int i;
  118. for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
  119. action_handler handler = action_handlers[i];
  120. struct host1x_waitlist *waiter, *next;
  121. list_for_each_entry_safe(waiter, next, head, list) {
  122. list_del(&waiter->list);
  123. handler(waiter);
  124. WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
  125. WLS_REMOVED);
  126. kref_put(&waiter->refcount, waiter_release);
  127. }
  128. }
  129. }
  130. /*
  131. * Remove & handle all waiters that have completed for the given syncpt
  132. */
  133. static int process_wait_list(struct host1x *host,
  134. struct host1x_syncpt *syncpt,
  135. u32 threshold)
  136. {
  137. struct list_head completed[HOST1X_INTR_ACTION_COUNT];
  138. unsigned int i;
  139. int empty;
  140. for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
  141. INIT_LIST_HEAD(completed + i);
  142. spin_lock(&syncpt->intr.lock);
  143. remove_completed_waiters(&syncpt->intr.wait_head, threshold,
  144. completed);
  145. empty = list_empty(&syncpt->intr.wait_head);
  146. if (empty)
  147. host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
  148. else
  149. reset_threshold_interrupt(host, &syncpt->intr.wait_head,
  150. syncpt->id);
  151. spin_unlock(&syncpt->intr.lock);
  152. run_handlers(completed);
  153. return empty;
  154. }
  155. /*
  156. * Sync point threshold interrupt service thread function
  157. * Handles sync point threshold triggers, in thread context
  158. */
  159. static void syncpt_thresh_work(struct work_struct *work)
  160. {
  161. struct host1x_syncpt_intr *syncpt_intr =
  162. container_of(work, struct host1x_syncpt_intr, work);
  163. struct host1x_syncpt *syncpt =
  164. container_of(syncpt_intr, struct host1x_syncpt, intr);
  165. unsigned int id = syncpt->id;
  166. struct host1x *host = syncpt->host;
  167. (void)process_wait_list(host, syncpt,
  168. host1x_syncpt_load(host->syncpt + id));
  169. }
  170. int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
  171. u32 thresh, enum host1x_intr_action action,
  172. void *data, struct host1x_waitlist *waiter,
  173. void **ref)
  174. {
  175. int queue_was_empty;
  176. if (waiter == NULL) {
  177. pr_warn("%s: NULL waiter\n", __func__);
  178. return -EINVAL;
  179. }
  180. /* initialize a new waiter */
  181. INIT_LIST_HEAD(&waiter->list);
  182. kref_init(&waiter->refcount);
  183. if (ref)
  184. kref_get(&waiter->refcount);
  185. waiter->thresh = thresh;
  186. waiter->action = action;
  187. atomic_set(&waiter->state, WLS_PENDING);
  188. waiter->data = data;
  189. waiter->count = 1;
  190. spin_lock(&syncpt->intr.lock);
  191. queue_was_empty = list_empty(&syncpt->intr.wait_head);
  192. if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
  193. /* added at head of list - new threshold value */
  194. host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
  195. /* added as first waiter - enable interrupt */
  196. if (queue_was_empty)
  197. host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
  198. }
  199. if (ref)
  200. *ref = waiter;
  201. spin_unlock(&syncpt->intr.lock);
  202. return 0;
  203. }
  204. void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
  205. bool flush)
  206. {
  207. struct host1x_waitlist *waiter = ref;
  208. struct host1x_syncpt *syncpt;
  209. atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
  210. syncpt = host->syncpt + id;
  211. spin_lock(&syncpt->intr.lock);
  212. if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
  213. WLS_CANCELLED) {
  214. list_del(&waiter->list);
  215. kref_put(&waiter->refcount, waiter_release);
  216. }
  217. spin_unlock(&syncpt->intr.lock);
  218. if (flush) {
  219. /* Wait until any concurrently executing handler has finished. */
  220. while (atomic_read(&waiter->state) != WLS_HANDLED)
  221. schedule();
  222. }
  223. kref_put(&waiter->refcount, waiter_release);
  224. }
  225. int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
  226. {
  227. unsigned int id;
  228. u32 nb_pts = host1x_syncpt_nb_pts(host);
  229. mutex_init(&host->intr_mutex);
  230. host->intr_syncpt_irq = irq_sync;
  231. for (id = 0; id < nb_pts; ++id) {
  232. struct host1x_syncpt *syncpt = host->syncpt + id;
  233. spin_lock_init(&syncpt->intr.lock);
  234. INIT_LIST_HEAD(&syncpt->intr.wait_head);
  235. snprintf(syncpt->intr.thresh_irq_name,
  236. sizeof(syncpt->intr.thresh_irq_name),
  237. "host1x_sp_%02u", id);
  238. }
  239. return 0;
  240. }
  241. void host1x_intr_deinit(struct host1x *host)
  242. {
  243. }
  244. void host1x_intr_start(struct host1x *host)
  245. {
  246. u32 hz = clk_get_rate(host->clk);
  247. int err;
  248. mutex_lock(&host->intr_mutex);
  249. err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
  250. syncpt_thresh_work);
  251. if (err) {
  252. mutex_unlock(&host->intr_mutex);
  253. return;
  254. }
  255. mutex_unlock(&host->intr_mutex);
  256. }
  257. void host1x_intr_stop(struct host1x *host)
  258. {
  259. unsigned int id;
  260. struct host1x_syncpt *syncpt = host->syncpt;
  261. u32 nb_pts = host1x_syncpt_nb_pts(host);
  262. mutex_lock(&host->intr_mutex);
  263. host1x_hw_intr_disable_all_syncpt_intrs(host);
  264. for (id = 0; id < nb_pts; ++id) {
  265. struct host1x_waitlist *waiter, *next;
  266. list_for_each_entry_safe(waiter, next,
  267. &syncpt[id].intr.wait_head, list) {
  268. if (atomic_cmpxchg(&waiter->state,
  269. WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
  270. list_del(&waiter->list);
  271. kref_put(&waiter->refcount, waiter_release);
  272. }
  273. }
  274. if (!list_empty(&syncpt[id].intr.wait_head)) {
  275. /* output diagnostics */
  276. mutex_unlock(&host->intr_mutex);
  277. pr_warn("%s cannot stop syncpt intr id=%u\n",
  278. __func__, id);
  279. return;
  280. }
  281. }
  282. host1x_hw_intr_free_syncpt_irq(host);
  283. mutex_unlock(&host->intr_mutex);
  284. }