kgsl_timeline.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/dma-fence.h>
  7. #include <linux/file.h>
  8. #include <linux/list.h>
  9. #include <linux/kref.h>
  10. #include <linux/sync_file.h>
  11. #include "kgsl_device.h"
  12. #include "kgsl_eventlog.h"
  13. #include "kgsl_sharedmem.h"
  14. #include "kgsl_timeline.h"
  15. #include "kgsl_trace.h"
  16. struct kgsl_timeline_fence {
  17. struct dma_fence base;
  18. struct kgsl_timeline *timeline;
  19. struct list_head node;
  20. };
  21. struct dma_fence *kgsl_timelines_to_fence_array(struct kgsl_device *device,
  22. u64 timelines, u32 count, u64 usize, bool any)
  23. {
  24. void __user *uptr = u64_to_user_ptr(timelines);
  25. struct dma_fence_array *array;
  26. struct dma_fence **fences;
  27. int i, ret = 0;
  28. if (!count || count > INT_MAX)
  29. return ERR_PTR(-EINVAL);
  30. fences = kcalloc(count, sizeof(*fences),
  31. GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
  32. if (!fences)
  33. return ERR_PTR(-ENOMEM);
  34. for (i = 0; i < count; i++) {
  35. struct kgsl_timeline_val val;
  36. struct kgsl_timeline *timeline;
  37. if (copy_struct_from_user(&val, sizeof(val), uptr, usize)) {
  38. ret = -EFAULT;
  39. goto err;
  40. }
  41. if (val.padding) {
  42. ret = -EINVAL;
  43. goto err;
  44. }
  45. timeline = kgsl_timeline_by_id(device, val.timeline);
  46. if (!timeline) {
  47. ret = -ENOENT;
  48. goto err;
  49. }
  50. fences[i] = kgsl_timeline_fence_alloc(timeline, val.seqno);
  51. kgsl_timeline_put(timeline);
  52. if (IS_ERR(fences[i])) {
  53. ret = PTR_ERR(fences[i]);
  54. goto err;
  55. }
  56. uptr += usize;
  57. }
  58. /* No need for a fence array for only one fence */
  59. if (count == 1) {
  60. struct dma_fence *fence = fences[0];
  61. kfree(fences);
  62. return fence;
  63. }
  64. array = dma_fence_array_create(count, fences,
  65. dma_fence_context_alloc(1), 0, any);
  66. if (array)
  67. return &array->base;
  68. ret = -ENOMEM;
  69. err:
  70. for (i = 0; i < count; i++) {
  71. if (!IS_ERR_OR_NULL(fences[i]))
  72. dma_fence_put(fences[i]);
  73. }
  74. kfree(fences);
  75. return ERR_PTR(ret);
  76. }
  77. void kgsl_timeline_destroy(struct kref *kref)
  78. {
  79. struct kgsl_timeline *timeline = container_of(kref,
  80. struct kgsl_timeline, ref);
  81. WARN_ON(!list_empty(&timeline->fences));
  82. WARN_ON(!list_empty(&timeline->events));
  83. trace_kgsl_timeline_destroy(timeline->id);
  84. kfree(timeline);
  85. }
  86. struct kgsl_timeline *kgsl_timeline_get(struct kgsl_timeline *timeline)
  87. {
  88. if (timeline) {
  89. if (!kref_get_unless_zero(&timeline->ref))
  90. return NULL;
  91. }
  92. return timeline;
  93. }
  94. static struct kgsl_timeline *kgsl_timeline_alloc(struct kgsl_device_private *dev_priv,
  95. u64 initial)
  96. {
  97. struct kgsl_device *device = dev_priv->device;
  98. struct kgsl_timeline *timeline;
  99. int id;
  100. timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
  101. if (!timeline)
  102. return ERR_PTR(-ENOMEM);
  103. idr_preload(GFP_KERNEL);
  104. spin_lock(&device->timelines_lock);
  105. /* Allocate the ID but don't attach the pointer just yet */
  106. id = idr_alloc(&device->timelines, NULL, 1, 0, GFP_NOWAIT);
  107. spin_unlock(&device->timelines_lock);
  108. idr_preload_end();
  109. if (id < 0) {
  110. kfree(timeline);
  111. return ERR_PTR(id);
  112. }
  113. timeline->context = dma_fence_context_alloc(1);
  114. timeline->id = id;
  115. INIT_LIST_HEAD(&timeline->fences);
  116. INIT_LIST_HEAD(&timeline->events);
  117. timeline->value = initial;
  118. timeline->dev_priv = dev_priv;
  119. snprintf((char *) timeline->name, sizeof(timeline->name),
  120. "kgsl-sw-timeline-%d", id);
  121. trace_kgsl_timeline_alloc(id, initial);
  122. spin_lock_init(&timeline->lock);
  123. spin_lock_init(&timeline->fence_lock);
  124. kref_init(&timeline->ref);
  125. return timeline;
  126. }
  127. static struct kgsl_timeline_fence *to_timeline_fence(struct dma_fence *fence)
  128. {
  129. return container_of(fence, struct kgsl_timeline_fence, base);
  130. }
  131. static void timeline_fence_release(struct dma_fence *fence)
  132. {
  133. struct kgsl_timeline_fence *f = to_timeline_fence(fence);
  134. struct kgsl_timeline *timeline = f->timeline;
  135. struct kgsl_timeline_fence *cur, *temp;
  136. unsigned long flags;
  137. spin_lock_irqsave(&timeline->fence_lock, flags);
  138. /* If the fence is still on the active list, remove it */
  139. list_for_each_entry_safe(cur, temp, &timeline->fences, node) {
  140. if (f != cur)
  141. continue;
  142. list_del_init(&f->node);
  143. break;
  144. }
  145. spin_unlock_irqrestore(&timeline->fence_lock, flags);
  146. trace_kgsl_timeline_fence_release(f->timeline->id, fence->seqno);
  147. log_kgsl_timeline_fence_release_event(f->timeline->id, fence->seqno);
  148. kgsl_timeline_put(f->timeline);
  149. dma_fence_free(fence);
  150. }
  151. static bool timeline_fence_signaled(struct dma_fence *fence)
  152. {
  153. struct kgsl_timeline_fence *f = to_timeline_fence(fence);
  154. return !__dma_fence_is_later(fence->seqno, f->timeline->value,
  155. fence->ops);
  156. }
  157. static bool timeline_fence_enable_signaling(struct dma_fence *fence)
  158. {
  159. /*
  160. * Return value of false indicates the fence already passed.
  161. * When fence is not passed we return true indicating successful
  162. * enabling.
  163. */
  164. return !timeline_fence_signaled(fence);
  165. }
  166. static const char *timeline_get_driver_name(struct dma_fence *fence)
  167. {
  168. return "kgsl-sw-timeline";
  169. }
  170. static const char *timeline_get_timeline_name(struct dma_fence *fence)
  171. {
  172. struct kgsl_timeline_fence *f = to_timeline_fence(fence);
  173. return f->timeline->name;
  174. }
  175. static void timeline_get_value_str(struct dma_fence *fence,
  176. char *str, int size)
  177. {
  178. struct kgsl_timeline_fence *f = to_timeline_fence(fence);
  179. snprintf(str, size, "%lld", f->timeline->value);
  180. }
  181. static const struct dma_fence_ops timeline_fence_ops = {
  182. .get_driver_name = timeline_get_driver_name,
  183. .get_timeline_name = timeline_get_timeline_name,
  184. .signaled = timeline_fence_signaled,
  185. .release = timeline_fence_release,
  186. .enable_signaling = timeline_fence_enable_signaling,
  187. .timeline_value_str = timeline_get_value_str,
  188. .use_64bit_seqno = true,
  189. };
  190. static void kgsl_timeline_add_fence(struct kgsl_timeline *timeline,
  191. struct kgsl_timeline_fence *fence)
  192. {
  193. struct kgsl_timeline_fence *entry;
  194. unsigned long flags;
  195. spin_lock_irqsave(&timeline->fence_lock, flags);
  196. list_for_each_entry(entry, &timeline->fences, node) {
  197. if (fence->base.seqno < entry->base.seqno) {
  198. list_add_tail(&fence->node, &entry->node);
  199. spin_unlock_irqrestore(&timeline->fence_lock, flags);
  200. return;
  201. }
  202. }
  203. list_add_tail(&fence->node, &timeline->fences);
  204. spin_unlock_irqrestore(&timeline->fence_lock, flags);
  205. }
  206. void kgsl_timeline_add_signal(struct kgsl_timeline_event *signal)
  207. {
  208. struct kgsl_timeline *timeline = signal->timeline;
  209. struct kgsl_timeline_event *event;
  210. unsigned long flags;
  211. spin_lock_irqsave(&timeline->lock, flags);
  212. /* If we already signaled this seqno don't add it to the list */
  213. if (timeline->value >= signal->seqno)
  214. goto done;
  215. /* Keep the list sorted by seqno */
  216. list_for_each_entry_reverse(event, &timeline->events, node) {
  217. if (event->seqno <= signal->seqno)
  218. break;
  219. }
  220. list_add(&signal->node, &event->node);
  221. done:
  222. spin_unlock_irqrestore(&timeline->lock, flags);
  223. }
  224. void kgsl_timeline_signal(struct kgsl_timeline *timeline, u64 seqno)
  225. {
  226. struct kgsl_timeline_fence *fence, *tmp;
  227. struct kgsl_timeline_event *event, *tmp_event;
  228. struct list_head temp;
  229. INIT_LIST_HEAD(&temp);
  230. spin_lock_irq(&timeline->lock);
  231. if (seqno < timeline->value)
  232. goto unlock;
  233. trace_kgsl_timeline_signal(timeline->id, seqno);
  234. timeline->value = seqno;
  235. list_for_each_entry_safe(event, tmp_event, &timeline->events, node) {
  236. /* List is sorted by seqno */
  237. if (event->seqno > seqno)
  238. break;
  239. /* Remove retired nodes */
  240. list_del(&event->node);
  241. }
  242. spin_lock(&timeline->fence_lock);
  243. list_for_each_entry_safe(fence, tmp, &timeline->fences, node)
  244. if (timeline_fence_signaled(&fence->base) &&
  245. kref_get_unless_zero(&fence->base.refcount))
  246. list_move(&fence->node, &temp);
  247. spin_unlock(&timeline->fence_lock);
  248. list_for_each_entry_safe(fence, tmp, &temp, node) {
  249. dma_fence_signal_locked(&fence->base);
  250. dma_fence_put(&fence->base);
  251. }
  252. unlock:
  253. spin_unlock_irq(&timeline->lock);
  254. }
  255. struct dma_fence *kgsl_timeline_fence_alloc(struct kgsl_timeline *timeline,
  256. u64 seqno)
  257. {
  258. struct kgsl_timeline_fence *fence;
  259. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  260. if (!fence)
  261. return ERR_PTR(-ENOMEM);
  262. fence->timeline = kgsl_timeline_get(timeline);
  263. if (!fence->timeline) {
  264. kfree(fence);
  265. return ERR_PTR(-ENOENT);
  266. }
  267. dma_fence_init(&fence->base, &timeline_fence_ops,
  268. &timeline->lock, timeline->context, seqno);
  269. INIT_LIST_HEAD(&fence->node);
  270. /*
  271. * Once fence is checked as not signaled, allow it to be added
  272. * in the list before other thread such as kgsl_timeline_signal
  273. * can get chance to signal.
  274. */
  275. spin_lock_irq(&timeline->lock);
  276. if (!dma_fence_is_signaled_locked(&fence->base))
  277. kgsl_timeline_add_fence(timeline, fence);
  278. trace_kgsl_timeline_fence_alloc(timeline->id, seqno);
  279. spin_unlock_irq(&timeline->lock);
  280. log_kgsl_timeline_fence_alloc_event(timeline->id, seqno);
  281. return &fence->base;
  282. }
  283. long kgsl_ioctl_timeline_create(struct kgsl_device_private *dev_priv,
  284. unsigned int cmd, void *data)
  285. {
  286. struct kgsl_device *device = dev_priv->device;
  287. struct kgsl_timeline_create *param = data;
  288. struct kgsl_timeline *timeline;
  289. timeline = kgsl_timeline_alloc(dev_priv, param->seqno);
  290. if (IS_ERR(timeline))
  291. return PTR_ERR(timeline);
  292. /* Commit the pointer to the timeline in timeline idr */
  293. spin_lock(&device->timelines_lock);
  294. idr_replace(&device->timelines, timeline, timeline->id);
  295. param->id = timeline->id;
  296. spin_unlock(&device->timelines_lock);
  297. return 0;
  298. }
  299. struct kgsl_timeline *kgsl_timeline_by_id(struct kgsl_device *device,
  300. u32 id)
  301. {
  302. struct kgsl_timeline *timeline;
  303. int ret = 0;
  304. spin_lock(&device->timelines_lock);
  305. timeline = idr_find(&device->timelines, id);
  306. if (timeline)
  307. ret = kref_get_unless_zero(&timeline->ref);
  308. spin_unlock(&device->timelines_lock);
  309. return ret ? timeline : NULL;
  310. }
  311. long kgsl_ioctl_timeline_wait(struct kgsl_device_private *dev_priv,
  312. unsigned int cmd, void *data)
  313. {
  314. struct kgsl_device *device = dev_priv->device;
  315. struct kgsl_timeline_wait *param = data;
  316. struct dma_fence *fence;
  317. unsigned long timeout;
  318. signed long ret;
  319. if (param->flags != KGSL_TIMELINE_WAIT_ANY &&
  320. param->flags != KGSL_TIMELINE_WAIT_ALL)
  321. return -EINVAL;
  322. if (param->padding)
  323. return -EINVAL;
  324. fence = kgsl_timelines_to_fence_array(device, param->timelines,
  325. param->count, param->timelines_size,
  326. (param->flags == KGSL_TIMELINE_WAIT_ANY));
  327. if (IS_ERR(fence))
  328. return PTR_ERR(fence);
  329. if (param->tv_sec >= KTIME_SEC_MAX)
  330. timeout = MAX_SCHEDULE_TIMEOUT;
  331. else {
  332. ktime_t time = ktime_set(param->tv_sec, param->tv_nsec);
  333. timeout = msecs_to_jiffies(ktime_to_ms(time));
  334. }
  335. trace_kgsl_timeline_wait(param->flags, param->tv_sec, param->tv_nsec);
  336. /* secs.nsecs to jiffies */
  337. if (!timeout)
  338. ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
  339. else {
  340. ret = dma_fence_wait_timeout(fence, true, timeout);
  341. if (!ret)
  342. ret = -ETIMEDOUT;
  343. else if (ret > 0)
  344. ret = 0;
  345. }
  346. dma_fence_put(fence);
  347. return ret;
  348. }
  349. long kgsl_ioctl_timeline_query(struct kgsl_device_private *dev_priv,
  350. unsigned int cmd, void *data)
  351. {
  352. struct kgsl_timeline_val *param = data;
  353. struct kgsl_timeline *timeline;
  354. struct kgsl_timeline_event *event;
  355. u64 seqno;
  356. if (param->padding)
  357. return -EINVAL;
  358. timeline = kgsl_timeline_by_id(dev_priv->device, param->timeline);
  359. if (!timeline)
  360. return -ENODEV;
  361. /*
  362. * Start from the end of the list to find the last retired signal event
  363. * that was not yet processed. Leave the entry in the list until the
  364. * timeline signal actually advances the timeline's value. This ensures
  365. * subsequent query ioctls return a monotonically increasing seqno.
  366. */
  367. spin_lock_irq(&timeline->lock);
  368. seqno = timeline->value;
  369. list_for_each_entry_reverse(event, &timeline->events, node) {
  370. if (kgsl_check_timestamp(event->context->device,
  371. event->context, event->timestamp)) {
  372. seqno = event->seqno;
  373. break;
  374. }
  375. }
  376. spin_unlock_irq(&timeline->lock);
  377. param->seqno = seqno;
  378. kgsl_timeline_put(timeline);
  379. return 0;
  380. }
  381. long kgsl_ioctl_timeline_fence_get(struct kgsl_device_private *dev_priv,
  382. unsigned int cmd, void *data)
  383. {
  384. struct kgsl_device *device = dev_priv->device;
  385. struct kgsl_timeline_fence_get *param = data;
  386. struct kgsl_timeline *timeline;
  387. struct sync_file *sync_file;
  388. struct dma_fence *fence;
  389. int ret = 0, fd;
  390. timeline = kgsl_timeline_by_id(device, param->timeline);
  391. if (!timeline)
  392. return -ENODEV;
  393. fence = kgsl_timeline_fence_alloc(timeline, param->seqno);
  394. if (IS_ERR(fence)) {
  395. kgsl_timeline_put(timeline);
  396. return PTR_ERR(fence);
  397. }
  398. fd = get_unused_fd_flags(O_CLOEXEC);
  399. if (fd < 0) {
  400. ret = fd;
  401. goto out;
  402. }
  403. sync_file = sync_file_create(fence);
  404. if (sync_file) {
  405. fd_install(fd, sync_file->file);
  406. param->handle = fd;
  407. } else {
  408. put_unused_fd(fd);
  409. ret = -ENOMEM;
  410. }
  411. out:
  412. dma_fence_put(fence);
  413. kgsl_timeline_put(timeline);
  414. return ret;
  415. }
  416. long kgsl_ioctl_timeline_signal(struct kgsl_device_private *dev_priv,
  417. unsigned int cmd, void *data)
  418. {
  419. struct kgsl_device *device = dev_priv->device;
  420. struct kgsl_timeline_signal *param = data;
  421. u64 timelines;
  422. int i;
  423. if (!param->timelines_size) {
  424. param->timelines_size = sizeof(struct kgsl_timeline_val);
  425. return -EAGAIN;
  426. }
  427. if (!param->count)
  428. return -EINVAL;
  429. timelines = param->timelines;
  430. for (i = 0; i < param->count; i++) {
  431. struct kgsl_timeline *timeline;
  432. struct kgsl_timeline_val val;
  433. if (copy_struct_from_user(&val, sizeof(val),
  434. u64_to_user_ptr(timelines), param->timelines_size))
  435. return -EFAULT;
  436. if (val.padding)
  437. return -EINVAL;
  438. timeline = kgsl_timeline_by_id(device, val.timeline);
  439. if (!timeline)
  440. return -ENODEV;
  441. kgsl_timeline_signal(timeline, val.seqno);
  442. kgsl_timeline_put(timeline);
  443. timelines += param->timelines_size;
  444. }
  445. return 0;
  446. }
  447. long kgsl_ioctl_timeline_destroy(struct kgsl_device_private *dev_priv,
  448. unsigned int cmd, void *data)
  449. {
  450. struct kgsl_device *device = dev_priv->device;
  451. struct kgsl_timeline_fence *fence, *tmp;
  452. struct kgsl_timeline *timeline;
  453. struct list_head temp;
  454. u32 *param = data;
  455. if (*param == 0)
  456. return -ENODEV;
  457. spin_lock(&device->timelines_lock);
  458. timeline = idr_find(&device->timelines, *param);
  459. if (timeline == NULL) {
  460. spin_unlock(&device->timelines_lock);
  461. return -ENODEV;
  462. }
  463. /*
  464. * Validate that the id given is owned by the dev_priv
  465. * instance that is passed in. If not, abort.
  466. */
  467. if (timeline->dev_priv != dev_priv) {
  468. spin_unlock(&device->timelines_lock);
  469. return -EINVAL;
  470. }
  471. idr_remove(&device->timelines, timeline->id);
  472. spin_unlock(&device->timelines_lock);
  473. INIT_LIST_HEAD(&temp);
  474. spin_lock(&timeline->fence_lock);
  475. list_for_each_entry_safe(fence, tmp, &timeline->fences, node)
  476. if (!kref_get_unless_zero(&fence->base.refcount))
  477. list_del_init(&fence->node);
  478. list_replace_init(&timeline->fences, &temp);
  479. spin_unlock(&timeline->fence_lock);
  480. spin_lock_irq(&timeline->lock);
  481. list_for_each_entry_safe(fence, tmp, &temp, node) {
  482. dma_fence_set_error(&fence->base, -ENOENT);
  483. dma_fence_signal_locked(&fence->base);
  484. dma_fence_put(&fence->base);
  485. }
  486. spin_unlock_irq(&timeline->lock);
  487. kgsl_timeline_put(timeline);
  488. return 0;
  489. }