kgsl_events.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include <linux/rwlock.h>
  8. #include "kgsl_debugfs.h"
  9. #include "kgsl_device.h"
  10. #include "kgsl_eventlog.h"
  11. #include "kgsl_trace.h"
  12. /*
  13. * Define an kmem cache for the event structures since we allocate and free them
  14. * so frequently
  15. */
  16. static struct kmem_cache *events_cache;
  17. static inline void signal_event(struct kgsl_device *device,
  18. struct kgsl_event *event, int result)
  19. {
  20. list_del(&event->node);
  21. event->result = result;
  22. kthread_queue_work(device->events_worker, &event->work);
  23. }
  24. /**
  25. * _kgsl_event_worker() - Work handler for processing GPU event callbacks
  26. * @work: Pointer to the kthread_work for the event
  27. *
  28. * Each event callback has its own kthread_work struct and is run on a event specific
  29. * worker thread. This is the worker that queues up the event callback function.
  30. */
  31. static void _kgsl_event_worker(struct kthread_work *work)
  32. {
  33. struct kgsl_event *event = container_of(work, struct kgsl_event, work);
  34. int id = KGSL_CONTEXT_ID(event->context);
  35. trace_kgsl_fire_event(id, event->timestamp, event->result,
  36. jiffies - event->created, event->func);
  37. log_kgsl_fire_event(id, event->timestamp, event->result,
  38. jiffies - event->created);
  39. event->func(event->device, event->group, event->priv, event->result);
  40. kgsl_context_put(event->context);
  41. kmem_cache_free(events_cache, event);
  42. }
  43. /* return true if the group needs to be processed */
  44. static bool _do_process_group(unsigned int processed, unsigned int cur)
  45. {
  46. if (processed == cur)
  47. return false;
  48. /*
  49. * This ensures that the timestamp didn't slip back accidently, maybe
  50. * due to a memory barrier issue. This is highly unlikely but we've
  51. * been burned here in the past.
  52. */
  53. if ((cur < processed) && ((processed - cur) < KGSL_TIMESTAMP_WINDOW))
  54. return false;
  55. return true;
  56. }
  57. static void _process_event_group(struct kgsl_device *device,
  58. struct kgsl_event_group *group, bool flush)
  59. {
  60. struct kgsl_event *event, *tmp;
  61. unsigned int timestamp;
  62. struct kgsl_context *context;
  63. if (group == NULL)
  64. return;
  65. context = group->context;
  66. /*
  67. * Sanity check to be sure that we aren't racing with the context
  68. * getting destroyed
  69. */
  70. if (WARN_ON(context != NULL && !_kgsl_context_get(context)))
  71. return;
  72. spin_lock(&group->lock);
  73. group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
  74. &timestamp);
  75. if (!flush && !_do_process_group(group->processed, timestamp))
  76. goto out;
  77. list_for_each_entry_safe(event, tmp, &group->events, node) {
  78. if (timestamp_cmp(event->timestamp, timestamp) <= 0)
  79. signal_event(device, event, KGSL_EVENT_RETIRED);
  80. else if (flush)
  81. signal_event(device, event, KGSL_EVENT_CANCELLED);
  82. }
  83. group->processed = timestamp;
  84. out:
  85. spin_unlock(&group->lock);
  86. kgsl_context_put(context);
  87. }
  88. /**
  89. * kgsl_process_event_group() - Handle all the retired events in a group
  90. * @device: Pointer to a KGSL device
  91. * @group: Pointer to a GPU events group to process
  92. */
  93. void kgsl_process_event_group(struct kgsl_device *device,
  94. struct kgsl_event_group *group)
  95. {
  96. _process_event_group(device, group, false);
  97. }
  98. /**
  99. * kgsl_flush_event_group() - flush all the events in a group by retiring the
  100. * ones can be retired and cancelling the ones that are pending
  101. * @device: Pointer to a KGSL device
  102. * @group: Pointer to a GPU events group to process
  103. */
  104. void kgsl_flush_event_group(struct kgsl_device *device,
  105. struct kgsl_event_group *group)
  106. {
  107. _process_event_group(device, group, true);
  108. }
  109. /**
  110. * kgsl_cancel_events_timestamp() - Cancel pending events for a given timestamp
  111. * @device: Pointer to a KGSL device
  112. * @group: Ponter to the GPU event group that owns the event
  113. * @timestamp: Registered expiry timestamp for the event
  114. */
  115. void kgsl_cancel_events_timestamp(struct kgsl_device *device,
  116. struct kgsl_event_group *group, unsigned int timestamp)
  117. {
  118. struct kgsl_event *event, *tmp;
  119. spin_lock(&group->lock);
  120. list_for_each_entry_safe(event, tmp, &group->events, node) {
  121. if (timestamp_cmp(timestamp, event->timestamp) == 0)
  122. signal_event(device, event, KGSL_EVENT_CANCELLED);
  123. }
  124. spin_unlock(&group->lock);
  125. }
  126. /**
  127. * kgsl_cancel_events() - Cancel all pending events in the group
  128. * @device: Pointer to a KGSL device
  129. * @group: Pointer to a kgsl_events_group
  130. */
  131. void kgsl_cancel_events(struct kgsl_device *device,
  132. struct kgsl_event_group *group)
  133. {
  134. struct kgsl_event *event, *tmp;
  135. spin_lock(&group->lock);
  136. list_for_each_entry_safe(event, tmp, &group->events, node)
  137. signal_event(device, event, KGSL_EVENT_CANCELLED);
  138. spin_unlock(&group->lock);
  139. }
  140. /**
  141. * kgsl_cancel_event() - Cancel a specific event from a group
  142. * @device: Pointer to a KGSL device
  143. * @group: Pointer to the group that contains the events
  144. * @timestamp: Registered expiry timestamp for the event
  145. * @func: Registered callback for the function
  146. * @priv: Registered priv data for the function
  147. */
  148. void kgsl_cancel_event(struct kgsl_device *device,
  149. struct kgsl_event_group *group, unsigned int timestamp,
  150. kgsl_event_func func, void *priv)
  151. {
  152. struct kgsl_event *event, *tmp;
  153. spin_lock(&group->lock);
  154. list_for_each_entry_safe(event, tmp, &group->events, node) {
  155. if (timestamp == event->timestamp && func == event->func &&
  156. event->priv == priv) {
  157. signal_event(device, event, KGSL_EVENT_CANCELLED);
  158. break;
  159. }
  160. }
  161. spin_unlock(&group->lock);
  162. }
  163. /**
  164. * kgsl_event_pending() - Searches for an event in an event group
  165. * @device: Pointer to a KGSL device
  166. * @group: Pointer to the group that contains the events
  167. * @timestamp: Registered expiry timestamp for the event
  168. * @func: Registered callback for the function
  169. * @priv: Registered priv data for the function
  170. */
  171. bool kgsl_event_pending(struct kgsl_device *device,
  172. struct kgsl_event_group *group,
  173. unsigned int timestamp, kgsl_event_func func, void *priv)
  174. {
  175. struct kgsl_event *event;
  176. bool result = false;
  177. spin_lock(&group->lock);
  178. list_for_each_entry(event, &group->events, node) {
  179. if (timestamp == event->timestamp && func == event->func &&
  180. event->priv == priv) {
  181. result = true;
  182. break;
  183. }
  184. }
  185. spin_unlock(&group->lock);
  186. return result;
  187. }
  188. /**
  189. * kgsl_add_event() - Add a new GPU event to a group
  190. * @device: Pointer to a KGSL device
  191. * @group: Pointer to the group to add the event to
  192. * @timestamp: Timestamp that the event will expire on
  193. * @func: Callback function for the event
  194. * @priv: Private data to send to the callback function
  195. */
  196. int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
  197. unsigned int timestamp, kgsl_event_func func, void *priv)
  198. {
  199. unsigned int queued;
  200. struct kgsl_context *context = group->context;
  201. struct kgsl_event *event;
  202. unsigned int retired;
  203. if (!func)
  204. return -EINVAL;
  205. /*
  206. * If the caller is creating their own timestamps, let them schedule
  207. * events in the future. Otherwise only allow timestamps that have been
  208. * queued.
  209. */
  210. if (!context || !(context->flags & KGSL_CONTEXT_USER_GENERATED_TS)) {
  211. group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_QUEUED,
  212. &queued);
  213. if (timestamp_cmp(timestamp, queued) > 0)
  214. return -EINVAL;
  215. }
  216. event = kmem_cache_alloc(events_cache, GFP_KERNEL);
  217. if (event == NULL)
  218. return -ENOMEM;
  219. /* Get a reference to the context while the event is active */
  220. if (context != NULL && !_kgsl_context_get(context)) {
  221. kmem_cache_free(events_cache, event);
  222. return -ENOENT;
  223. }
  224. event->device = device;
  225. event->context = context;
  226. event->timestamp = timestamp;
  227. event->priv = priv;
  228. event->func = func;
  229. event->created = jiffies;
  230. event->group = group;
  231. kthread_init_work(&event->work, _kgsl_event_worker);
  232. trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
  233. spin_lock(&group->lock);
  234. /*
  235. * Check to see if the requested timestamp has already retired. If so,
  236. * schedule the callback right away
  237. */
  238. group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
  239. &retired);
  240. if (timestamp_cmp(retired, timestamp) >= 0) {
  241. event->result = KGSL_EVENT_RETIRED;
  242. kthread_queue_work(device->events_worker, &event->work);
  243. spin_unlock(&group->lock);
  244. return 0;
  245. }
  246. /* Add the event to the group list */
  247. list_add_tail(&event->node, &group->events);
  248. spin_unlock(&group->lock);
  249. return 0;
  250. }
  251. void kgsl_process_event_groups(struct kgsl_device *device)
  252. {
  253. struct kgsl_event_group *group;
  254. read_lock(&device->event_groups_lock);
  255. list_for_each_entry(group, &device->event_groups, group)
  256. _process_event_group(device, group, false);
  257. read_unlock(&device->event_groups_lock);
  258. }
  259. void kgsl_del_event_group(struct kgsl_device *device,
  260. struct kgsl_event_group *group)
  261. {
  262. /* Check if the group is uninintalized */
  263. if (!group->context)
  264. return;
  265. /* Make sure that all the events have been deleted from the list */
  266. WARN_ON(!list_empty(&group->events));
  267. write_lock(&device->event_groups_lock);
  268. list_del(&group->group);
  269. write_unlock(&device->event_groups_lock);
  270. }
  271. void kgsl_add_event_group(struct kgsl_device *device,
  272. struct kgsl_event_group *group, struct kgsl_context *context,
  273. readtimestamp_func readtimestamp,
  274. void *priv, const char *fmt, ...)
  275. {
  276. va_list args;
  277. WARN_ON(readtimestamp == NULL);
  278. spin_lock_init(&group->lock);
  279. INIT_LIST_HEAD(&group->events);
  280. group->context = context;
  281. group->readtimestamp = readtimestamp;
  282. group->priv = priv;
  283. if (fmt) {
  284. va_start(args, fmt);
  285. vsnprintf(group->name, sizeof(group->name), fmt, args);
  286. va_end(args);
  287. }
  288. write_lock(&device->event_groups_lock);
  289. list_add_tail(&group->group, &device->event_groups);
  290. write_unlock(&device->event_groups_lock);
  291. }
  292. static void events_debugfs_print_group(struct seq_file *s,
  293. struct kgsl_event_group *group)
  294. {
  295. struct kgsl_event *event;
  296. unsigned int retired;
  297. spin_lock(&group->lock);
  298. seq_printf(s, "%s: last=%d\n", group->name, group->processed);
  299. list_for_each_entry(event, &group->events, node) {
  300. group->readtimestamp(event->device, group->priv,
  301. KGSL_TIMESTAMP_RETIRED, &retired);
  302. seq_printf(s, "\t%u:%u age=%lu func=%ps [retired=%u]\n",
  303. group->context ? group->context->id :
  304. KGSL_MEMSTORE_GLOBAL,
  305. event->timestamp, jiffies - event->created,
  306. event->func, retired);
  307. }
  308. spin_unlock(&group->lock);
  309. }
  310. static int events_show(struct seq_file *s, void *unused)
  311. {
  312. struct kgsl_device *device = s->private;
  313. struct kgsl_event_group *group;
  314. seq_puts(s, "event groups:\n");
  315. seq_puts(s, "--------------\n");
  316. read_lock(&device->event_groups_lock);
  317. list_for_each_entry(group, &device->event_groups, group) {
  318. events_debugfs_print_group(s, group);
  319. seq_puts(s, "\n");
  320. }
  321. read_unlock(&device->event_groups_lock);
  322. return 0;
  323. }
  324. DEFINE_SHOW_ATTRIBUTE(events);
  325. void kgsl_device_events_remove(struct kgsl_device *device)
  326. {
  327. struct kgsl_event_group *group, *tmp;
  328. write_lock(&device->event_groups_lock);
  329. list_for_each_entry_safe(group, tmp, &device->event_groups, group) {
  330. WARN_ON(!list_empty(&group->events));
  331. list_del(&group->group);
  332. }
  333. write_unlock(&device->event_groups_lock);
  334. }
  335. void kgsl_device_events_probe(struct kgsl_device *device)
  336. {
  337. INIT_LIST_HEAD(&device->event_groups);
  338. rwlock_init(&device->event_groups_lock);
  339. debugfs_create_file("events", 0444, device->d_debugfs, device,
  340. &events_fops);
  341. }
  342. /**
  343. * kgsl_events_exit() - Destroy the event kmem cache on module exit
  344. */
  345. void kgsl_events_exit(void)
  346. {
  347. kmem_cache_destroy(events_cache);
  348. }
  349. /**
  350. * kgsl_events_init() - Create the event kmem cache on module start
  351. */
  352. void __init kgsl_events_init(void)
  353. {
  354. events_cache = KMEM_CACHE(kgsl_event, 0);
  355. }