hgsl_sync.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/types.h>
  7. #include <linux/dma-buf.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/regmap.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/jiffies.h>
  14. #include "hgsl.h"
  15. #define HGSL_HSYNC_FINI_RETRY_COUNT 50
  16. #define HGSL_HSYNC_FINI_RETRY_TIME_SLICE 10
  17. #define HGSL_TIMELINE_INFINITE_TIMEOUT (~(0ULL))
  18. static const struct dma_fence_ops hgsl_hsync_fence_ops;
  19. static const struct dma_fence_ops hgsl_isync_fence_ops;
  20. int hgsl_hsync_fence_create_fd(struct hgsl_context *context,
  21. uint32_t ts)
  22. {
  23. int fence_fd;
  24. struct hgsl_hsync_fence *fence;
  25. fence_fd = get_unused_fd_flags(0);
  26. if (fence_fd < 0)
  27. return fence_fd;
  28. fence = hgsl_hsync_fence_create(context, ts);
  29. if (fence == NULL) {
  30. put_unused_fd(fence_fd);
  31. return -ENOMEM;
  32. }
  33. fd_install(fence_fd, fence->sync_file->file);
  34. return fence_fd;
  35. }
  36. struct hgsl_hsync_fence *hgsl_hsync_fence_create(
  37. struct hgsl_context *context,
  38. uint32_t ts)
  39. {
  40. unsigned long flags;
  41. struct hgsl_hsync_timeline *timeline = context->timeline;
  42. struct hgsl_hsync_fence *fence;
  43. if (timeline == NULL)
  44. return NULL;
  45. if (!kref_get_unless_zero(&timeline->kref))
  46. return NULL;
  47. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  48. if (fence == NULL) {
  49. hgsl_hsync_timeline_put(timeline);
  50. return NULL;
  51. }
  52. fence->ts = ts;
  53. dma_fence_init(&fence->fence, &hgsl_hsync_fence_ops,
  54. &timeline->lock, timeline->fence_context, ts);
  55. fence->sync_file = sync_file_create(&fence->fence);
  56. dma_fence_put(&fence->fence);
  57. if (fence->sync_file == NULL) {
  58. hgsl_hsync_timeline_put(timeline);
  59. return NULL;
  60. }
  61. fence->timeline = timeline;
  62. spin_lock_irqsave(&timeline->lock, flags);
  63. list_add_tail(&fence->child_list, &timeline->fence_list);
  64. spin_unlock_irqrestore(&timeline->lock, flags);
  65. return fence;
  66. }
  67. void hgsl_hsync_timeline_signal(struct hgsl_hsync_timeline *timeline,
  68. unsigned int ts)
  69. {
  70. struct hgsl_hsync_fence *cur, *next;
  71. unsigned long flags;
  72. if (!kref_get_unless_zero(&timeline->kref))
  73. return;
  74. if (hgsl_ts32_ge(timeline->last_ts, ts)) {
  75. hgsl_hsync_timeline_put(timeline);
  76. return;
  77. }
  78. spin_lock_irqsave(&timeline->lock, flags);
  79. timeline->last_ts = ts;
  80. list_for_each_entry_safe(cur, next, &timeline->fence_list,
  81. child_list) {
  82. if (dma_fence_is_signaled_locked(&cur->fence))
  83. list_del_init(&cur->child_list);
  84. }
  85. spin_unlock_irqrestore(&timeline->lock, flags);
  86. hgsl_hsync_timeline_put(timeline);
  87. }
  88. int hgsl_hsync_timeline_create(struct hgsl_context *context)
  89. {
  90. struct hgsl_hsync_timeline *timeline;
  91. timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
  92. if (!timeline)
  93. return -ENOMEM;
  94. snprintf(timeline->name, HGSL_TIMELINE_NAME_LEN,
  95. "timeline_%s_%d",
  96. current->comm, current->pid);
  97. kref_init(&timeline->kref);
  98. timeline->fence_context = dma_fence_context_alloc(1);
  99. INIT_LIST_HEAD(&timeline->fence_list);
  100. spin_lock_init(&timeline->lock);
  101. timeline->context = context;
  102. context->timeline = timeline;
  103. return 0;
  104. }
  105. static void hgsl_hsync_timeline_destroy(struct kref *kref)
  106. {
  107. struct hgsl_hsync_timeline *timeline =
  108. container_of(kref, struct hgsl_hsync_timeline, kref);
  109. kfree(timeline);
  110. }
  111. void hgsl_hsync_timeline_put(struct hgsl_hsync_timeline *timeline)
  112. {
  113. if (timeline)
  114. kref_put(&timeline->kref, hgsl_hsync_timeline_destroy);
  115. }
  116. void hgsl_hsync_timeline_fini(struct hgsl_context *context)
  117. {
  118. struct hgsl_hsync_timeline *timeline = context->timeline;
  119. struct hgsl_hsync_fence *fence;
  120. int retry_count = HGSL_HSYNC_FINI_RETRY_COUNT;
  121. unsigned int max_ts = 0;
  122. unsigned long flags;
  123. if (!kref_get_unless_zero(&timeline->kref))
  124. return;
  125. spin_lock_irqsave(&timeline->lock, flags);
  126. while ((retry_count >= 0) && (!list_empty(&timeline->fence_list))) {
  127. spin_unlock_irqrestore(&timeline->lock, flags);
  128. msleep(HGSL_HSYNC_FINI_RETRY_TIME_SLICE);
  129. retry_count--;
  130. spin_lock_irqsave(&timeline->lock, flags);
  131. }
  132. list_for_each_entry(fence, &timeline->fence_list, child_list)
  133. if (max_ts < fence->ts)
  134. max_ts = fence->ts;
  135. spin_unlock_irqrestore(&timeline->lock, flags);
  136. hgsl_hsync_timeline_signal(timeline, max_ts);
  137. context->last_ts = max_ts;
  138. hgsl_hsync_timeline_put(timeline);
  139. }
  140. static const char *hgsl_hsync_get_driver_name(struct dma_fence *base)
  141. {
  142. return "hgsl-timeline";
  143. }
  144. static const char *hgsl_hsync_get_timeline_name(struct dma_fence *base)
  145. {
  146. struct hgsl_hsync_fence *fence =
  147. container_of(base, struct hgsl_hsync_fence, fence);
  148. struct hgsl_hsync_timeline *timeline = fence->timeline;
  149. return (timeline == NULL) ? "null" : timeline->name;
  150. }
  151. static bool hgsl_hsync_enable_signaling(struct dma_fence *base)
  152. {
  153. return true;
  154. }
  155. static bool hgsl_hsync_has_signaled(struct dma_fence *base)
  156. {
  157. struct hgsl_hsync_fence *fence =
  158. container_of(base, struct hgsl_hsync_fence, fence);
  159. struct hgsl_hsync_timeline *timeline = fence->timeline;
  160. return hgsl_ts32_ge(timeline->last_ts, fence->ts);
  161. }
  162. static void hgsl_hsync_fence_release(struct dma_fence *base)
  163. {
  164. struct hgsl_hsync_fence *fence =
  165. container_of(base, struct hgsl_hsync_fence, fence);
  166. struct hgsl_hsync_timeline *timeline = fence->timeline;
  167. if (timeline) {
  168. spin_lock(&timeline->lock);
  169. list_del_init(&fence->child_list);
  170. spin_unlock(&timeline->lock);
  171. hgsl_hsync_timeline_put(timeline);
  172. }
  173. kfree(fence);
  174. }
  175. static void hgsl_hsync_fence_value_str(struct dma_fence *base,
  176. char *str, int size)
  177. {
  178. struct hgsl_hsync_fence *fence =
  179. container_of(base, struct hgsl_hsync_fence, fence);
  180. snprintf(str, size, "%u", fence->ts);
  181. }
  182. static void hgsl_hsync_timeline_value_str(struct dma_fence *base,
  183. char *str, int size)
  184. {
  185. struct hgsl_hsync_fence *fence =
  186. container_of(base, struct hgsl_hsync_fence, fence);
  187. struct hgsl_hsync_timeline *timeline = fence->timeline;
  188. if (!kref_get_unless_zero(&timeline->kref))
  189. return;
  190. snprintf(str, size, "Last retired TS:%u", timeline->last_ts);
  191. hgsl_hsync_timeline_put(timeline);
  192. }
  193. static const struct dma_fence_ops hgsl_hsync_fence_ops = {
  194. .get_driver_name = hgsl_hsync_get_driver_name,
  195. .get_timeline_name = hgsl_hsync_get_timeline_name,
  196. .enable_signaling = hgsl_hsync_enable_signaling,
  197. .signaled = hgsl_hsync_has_signaled,
  198. .wait = dma_fence_default_wait,
  199. .release = hgsl_hsync_fence_release,
  200. .fence_value_str = hgsl_hsync_fence_value_str,
  201. .timeline_value_str = hgsl_hsync_timeline_value_str,
  202. };
  203. static void hgsl_isync_timeline_release(struct kref *kref)
  204. {
  205. struct hgsl_isync_timeline *timeline = container_of(kref,
  206. struct hgsl_isync_timeline,
  207. kref);
  208. kfree(timeline);
  209. }
  210. static struct hgsl_isync_timeline *
  211. hgsl_isync_timeline_get(struct hgsl_priv *priv, int id, bool check_owner)
  212. {
  213. int ret = 0;
  214. struct qcom_hgsl *hgsl = priv->dev;
  215. struct hgsl_isync_timeline *timeline = NULL;
  216. spin_lock(&hgsl->isync_timeline_lock);
  217. timeline = idr_find(&hgsl->isync_timeline_idr, id);
  218. if (timeline) {
  219. if (check_owner && (timeline->priv != priv)) {
  220. timeline = NULL;
  221. } else {
  222. ret = kref_get_unless_zero(&timeline->kref);
  223. if (!ret)
  224. timeline = NULL;
  225. }
  226. }
  227. spin_unlock(&hgsl->isync_timeline_lock);
  228. return timeline;
  229. }
  230. static void hgsl_isync_timeline_put(struct hgsl_isync_timeline *timeline)
  231. {
  232. if (timeline)
  233. kref_put(&timeline->kref, hgsl_isync_timeline_release);
  234. }
  235. int hgsl_isync_timeline_create(struct hgsl_priv *priv,
  236. uint32_t *timeline_id,
  237. uint32_t flags,
  238. uint64_t initial_ts)
  239. {
  240. struct qcom_hgsl *hgsl = priv->dev;
  241. struct hgsl_isync_timeline *timeline;
  242. int ret = -EINVAL;
  243. uint32_t idr;
  244. if (timeline_id == NULL)
  245. return -EINVAL;
  246. timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
  247. if (timeline == NULL)
  248. return -ENOMEM;
  249. kref_init(&timeline->kref);
  250. timeline->context = dma_fence_context_alloc(1);
  251. INIT_LIST_HEAD(&timeline->fence_list);
  252. spin_lock_init(&timeline->lock);
  253. timeline->priv = priv;
  254. snprintf((char *) timeline->name, sizeof(timeline->name),
  255. "isync-timeline-%d", *timeline_id);
  256. timeline->flags = flags;
  257. timeline->last_ts = initial_ts;
  258. timeline->is64bits = ((flags & HGSL_ISYNC_64BITS_TIMELINE) != 0);
  259. idr_preload(GFP_KERNEL);
  260. spin_lock(&hgsl->isync_timeline_lock);
  261. idr = idr_alloc(&hgsl->isync_timeline_idr, timeline, 1, 0, GFP_NOWAIT);
  262. if (idr > 0) {
  263. timeline->id = idr;
  264. *timeline_id = idr;
  265. ret = 0;
  266. }
  267. spin_unlock(&hgsl->isync_timeline_lock);
  268. idr_preload_end();
  269. /* allocate IDR failed */
  270. if (ret != 0)
  271. kfree(timeline);
  272. return ret;
  273. }
  274. int hgsl_isync_fence_create(struct hgsl_priv *priv, uint32_t timeline_id,
  275. uint32_t ts, bool ts_is_valid, int *fence_fd)
  276. {
  277. unsigned long flags;
  278. struct hgsl_isync_timeline *timeline = NULL;
  279. struct hgsl_isync_fence *fence = NULL;
  280. struct sync_file *sync_file = NULL;
  281. int ret = 0;
  282. if (fence_fd == NULL)
  283. return -EINVAL;
  284. timeline = hgsl_isync_timeline_get(priv, timeline_id, true);
  285. if (timeline == NULL) {
  286. ret = -EINVAL;
  287. goto out;
  288. }
  289. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  290. if (fence == NULL) {
  291. ret = -ENOMEM;
  292. goto out;
  293. }
  294. /* set a minimal ts if user don't set it */
  295. if (!ts_is_valid)
  296. ts = 1;
  297. fence->ts = ts;
  298. dma_fence_init(&fence->fence, &hgsl_isync_fence_ops,
  299. &timeline->lock,
  300. timeline->context,
  301. ts);
  302. sync_file = sync_file_create(&fence->fence);
  303. if (sync_file == NULL) {
  304. ret = -ENOMEM;
  305. goto out_fence;
  306. }
  307. *fence_fd = get_unused_fd_flags(0);
  308. if (*fence_fd < 0) {
  309. ret = -EBADF;
  310. goto out_fence;
  311. }
  312. fd_install(*fence_fd, sync_file->file);
  313. fence->timeline = timeline;
  314. spin_lock_irqsave(&timeline->lock, flags);
  315. list_add_tail(&fence->child_list, &timeline->fence_list);
  316. spin_unlock_irqrestore(&timeline->lock, flags);
  317. out_fence:
  318. dma_fence_put(&fence->fence);
  319. out:
  320. if (ret) {
  321. if (sync_file)
  322. fput(sync_file->file);
  323. if (timeline)
  324. hgsl_isync_timeline_put(timeline);
  325. }
  326. return ret;
  327. }
  328. static int hgsl_isync_timeline_destruct(struct hgsl_priv *priv,
  329. struct hgsl_isync_timeline *timeline)
  330. {
  331. unsigned long flags;
  332. struct hgsl_isync_fence *cur, *next;
  333. LIST_HEAD(flist);
  334. if (timeline == NULL)
  335. return -EINVAL;
  336. spin_lock_irqsave(&timeline->lock, flags);
  337. list_for_each_entry_safe(cur, next, &timeline->fence_list,
  338. child_list) {
  339. if (dma_fence_get_rcu(&cur->fence)) {
  340. list_del_init(&cur->child_list);
  341. list_add(&cur->free_list, &flist);
  342. }
  343. }
  344. spin_unlock_irqrestore(&timeline->lock, flags);
  345. list_for_each_entry_safe(cur, next, &flist, free_list) {
  346. list_del(&cur->free_list);
  347. dma_fence_signal(&cur->fence);
  348. dma_fence_put(&cur->fence);
  349. }
  350. hgsl_isync_timeline_put(timeline);
  351. return 0;
  352. }
  353. int hgsl_isync_timeline_destroy(struct hgsl_priv *priv, uint32_t id)
  354. {
  355. struct qcom_hgsl *hgsl = priv->dev;
  356. struct hgsl_isync_timeline *timeline;
  357. spin_lock(&hgsl->isync_timeline_lock);
  358. timeline = idr_find(&hgsl->isync_timeline_idr, id);
  359. if (timeline) {
  360. if (timeline->priv == priv) {
  361. idr_remove(&hgsl->isync_timeline_idr, timeline->id);
  362. timeline->id = 0;
  363. } else {
  364. timeline = NULL;
  365. }
  366. }
  367. spin_unlock(&hgsl->isync_timeline_lock);
  368. if (timeline == NULL)
  369. return -EINVAL;
  370. return hgsl_isync_timeline_destruct(priv, timeline);
  371. }
  372. void hgsl_isync_fini(struct hgsl_priv *priv)
  373. {
  374. LIST_HEAD(flist);
  375. struct qcom_hgsl *hgsl = priv->dev;
  376. struct hgsl_isync_timeline *cur, *t;
  377. uint32_t idr;
  378. spin_lock(&hgsl->isync_timeline_lock);
  379. idr_for_each_entry(&hgsl->isync_timeline_idr,
  380. cur, idr) {
  381. if (cur->priv == priv) {
  382. idr_remove(&hgsl->isync_timeline_idr, idr);
  383. list_add(&cur->free_list, &flist);
  384. }
  385. }
  386. spin_unlock(&hgsl->isync_timeline_lock);
  387. list_for_each_entry_safe(cur, t, &flist, free_list) {
  388. list_del(&cur->free_list);
  389. hgsl_isync_timeline_destruct(priv, cur);
  390. }
  391. }
  392. static int _isync_timeline_signal(
  393. struct hgsl_isync_timeline *timeline,
  394. struct dma_fence *fence)
  395. {
  396. unsigned long flags;
  397. int ret = -EINVAL;
  398. struct hgsl_isync_fence *cur, *next;
  399. bool found = false;
  400. spin_lock_irqsave(&timeline->lock, flags);
  401. list_for_each_entry_safe(cur, next, &timeline->fence_list,
  402. child_list) {
  403. if (fence == &cur->fence) {
  404. list_del_init(&cur->child_list);
  405. found = true;
  406. break;
  407. }
  408. }
  409. spin_unlock_irqrestore(&timeline->lock, flags);
  410. if (found) {
  411. dma_fence_signal(fence);
  412. ret = 0;
  413. }
  414. return ret;
  415. }
  416. int hgsl_isync_fence_signal(struct hgsl_priv *priv, uint32_t timeline_id,
  417. int fence_fd)
  418. {
  419. struct hgsl_isync_timeline *timeline;
  420. struct dma_fence *fence = NULL;
  421. int ret = -EINVAL;
  422. timeline = hgsl_isync_timeline_get(priv, timeline_id, true);
  423. if (timeline == NULL) {
  424. ret = -EINVAL;
  425. goto out;
  426. }
  427. fence = sync_file_get_fence(fence_fd);
  428. if (fence == NULL) {
  429. ret = -EBADF;
  430. goto out;
  431. }
  432. ret = _isync_timeline_signal(timeline, fence);
  433. out:
  434. if (fence)
  435. dma_fence_put(fence);
  436. if (timeline)
  437. hgsl_isync_timeline_put(timeline);
  438. return ret;
  439. }
  440. int hgsl_isync_forward(struct hgsl_priv *priv, uint32_t timeline_id,
  441. uint64_t ts, bool check_owner)
  442. {
  443. unsigned long flags;
  444. struct hgsl_isync_timeline *timeline;
  445. struct hgsl_isync_fence *cur, *next;
  446. struct dma_fence *base;
  447. LIST_HEAD(flist);
  448. timeline = hgsl_isync_timeline_get(priv, timeline_id, check_owner);
  449. if (timeline == NULL)
  450. return -EINVAL;
  451. if (hgsl_ts_ge(timeline->last_ts, ts, timeline->is64bits))
  452. goto out;
  453. spin_lock_irqsave(&timeline->lock, flags);
  454. timeline->last_ts = ts;
  455. list_for_each_entry_safe(cur, next, &timeline->fence_list,
  456. child_list) {
  457. if (hgsl_ts_ge(ts, cur->ts, timeline->is64bits)) {
  458. base = dma_fence_get_rcu(&cur->fence);
  459. list_del_init(&cur->child_list);
  460. /* It *shouldn't* happen. If it does, it's
  461. * the last thing you'll see
  462. */
  463. if (base == NULL)
  464. pr_warn(" Invalid fence:%p.\n", cur);
  465. else
  466. list_add(&cur->free_list, &flist);
  467. }
  468. }
  469. spin_unlock_irqrestore(&timeline->lock, flags);
  470. list_for_each_entry_safe(cur, next, &flist, free_list) {
  471. list_del(&cur->free_list);
  472. dma_fence_signal(&cur->fence);
  473. dma_fence_put(&cur->fence);
  474. }
  475. out:
  476. if (timeline)
  477. hgsl_isync_timeline_put(timeline);
  478. return 0;
  479. }
  480. int hgsl_isync_query(struct hgsl_priv *priv, uint32_t timeline_id,
  481. uint64_t *ts)
  482. {
  483. struct hgsl_isync_timeline *timeline;
  484. timeline = hgsl_isync_timeline_get(priv, timeline_id, false);
  485. if (timeline == NULL)
  486. return -EINVAL;
  487. *ts = timeline->last_ts;
  488. hgsl_isync_timeline_put(timeline);
  489. return 0;
  490. }
  491. static struct dma_fence *hgsl_timelines_to_fence_array(struct hgsl_priv *priv,
  492. u64 timelines, u32 count, u64 usize, bool any)
  493. {
  494. void __user *uptr = u64_to_user_ptr(timelines);
  495. struct dma_fence_array *array;
  496. struct dma_fence **fences;
  497. struct hgsl_isync_fence *fence = NULL;
  498. int i, ret = 0;
  499. if (!count || count > INT_MAX)
  500. return ERR_PTR(-EINVAL);
  501. fences = kcalloc(count, sizeof(*fences),
  502. GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
  503. if (!fences)
  504. return ERR_PTR(-ENOMEM);
  505. for (i = 0; i < count; i++) {
  506. struct hgsl_timeline_val val;
  507. struct hgsl_isync_timeline *timeline;
  508. if (copy_struct_from_user(&val, sizeof(val), uptr, usize)) {
  509. ret = -EFAULT;
  510. goto err;
  511. }
  512. if (val.padding) {
  513. ret = -EINVAL;
  514. goto err;
  515. }
  516. timeline = hgsl_isync_timeline_get(priv, val.timeline_id, false);
  517. if (!timeline) {
  518. ret = -ENOENT;
  519. goto err;
  520. }
  521. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  522. if (fence == NULL) {
  523. hgsl_isync_timeline_put(timeline);
  524. ret = -ENOMEM;
  525. goto err;
  526. }
  527. fence->timeline = timeline;
  528. fence->ts = val.timepoint;
  529. dma_fence_init(&fence->fence, &hgsl_isync_fence_ops,
  530. &timeline->lock,
  531. timeline->context,
  532. fence->ts);
  533. spin_lock(&timeline->lock);
  534. list_add_tail(&fence->child_list, &timeline->fence_list);
  535. spin_unlock(&timeline->lock);
  536. fences[i] = &fence->fence;
  537. uptr += usize;
  538. }
  539. /* No need for a fence array for only one fence */
  540. if (count == 1) {
  541. struct dma_fence *fence = fences[0];
  542. kfree(fences);
  543. return fence;
  544. }
  545. array = dma_fence_array_create(count, fences,
  546. dma_fence_context_alloc(1), 0, any);
  547. if (array)
  548. return &array->base;
  549. ret = -ENOMEM;
  550. err:
  551. for (i = 0; i < count; i++) {
  552. if (!IS_ERR_OR_NULL(fences[i]))
  553. dma_fence_put(fences[i]);
  554. }
  555. kfree(fences);
  556. return ERR_PTR(ret);
  557. }
  558. int hgsl_isync_wait_multiple(struct hgsl_priv *priv, struct hgsl_timeline_wait *param)
  559. {
  560. struct dma_fence *fence;
  561. unsigned long timeout;
  562. signed long ret;
  563. if (param->flags != HGSL_TIMELINE_WAIT_ANY &&
  564. param->flags != HGSL_TIMELINE_WAIT_ALL)
  565. return -EINVAL;
  566. if (param->padding)
  567. return -EINVAL;
  568. fence = hgsl_timelines_to_fence_array(priv, param->timelines,
  569. param->count, param->timelines_size,
  570. (param->flags == HGSL_TIMELINE_WAIT_ANY));
  571. if (IS_ERR(fence))
  572. return PTR_ERR(fence);
  573. if (param->timeout_nanosec == HGSL_TIMELINE_INFINITE_TIMEOUT)
  574. timeout = MAX_SCHEDULE_TIMEOUT;
  575. else {
  576. struct timespec64 timespec;
  577. timespec.tv_sec = param->timeout_nanosec / NSEC_PER_SEC;
  578. timespec.tv_nsec = param->timeout_nanosec % NSEC_PER_SEC;
  579. timeout = timespec64_to_jiffies(&timespec);
  580. }
  581. if (!timeout)
  582. ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
  583. else {
  584. ret = dma_fence_wait_timeout(fence, true, timeout);
  585. if (!ret)
  586. ret = -ETIMEDOUT;
  587. else if (ret > 0)
  588. ret = 0;
  589. else if (ret == -ERESTARTSYS)
  590. ret = -EINTR;
  591. }
  592. dma_fence_put(fence);
  593. return ret;
  594. }
  595. static const char *hgsl_isync_get_driver_name(struct dma_fence *base)
  596. {
  597. return "hgsl";
  598. }
  599. static const char *hgsl_isync_get_timeline_name(struct dma_fence *base)
  600. {
  601. struct hgsl_isync_fence *fence =
  602. container_of(base,
  603. struct hgsl_isync_fence,
  604. fence);
  605. struct hgsl_isync_timeline *timeline = fence->timeline;
  606. return (timeline == NULL) ? "null":timeline->name;
  607. }
  608. static bool hgsl_isync_has_signaled(struct dma_fence *base)
  609. {
  610. struct hgsl_isync_fence *fence = NULL;
  611. struct hgsl_isync_timeline *timeline = NULL;
  612. if (base) {
  613. fence = container_of(base, struct hgsl_isync_fence, fence);
  614. timeline = fence->timeline;
  615. if (timeline)
  616. return hgsl_ts_ge(timeline->last_ts, fence->ts, timeline->is64bits);
  617. }
  618. return false;
  619. }
  620. static bool hgsl_isync_enable_signaling(struct dma_fence *base)
  621. {
  622. return !hgsl_isync_has_signaled(base);
  623. }
  624. static void hgsl_isync_fence_release(struct dma_fence *base)
  625. {
  626. unsigned long flags;
  627. struct hgsl_isync_fence *fence = container_of(base,
  628. struct hgsl_isync_fence,
  629. fence);
  630. struct hgsl_isync_timeline *timeline = fence->timeline;
  631. if (timeline) {
  632. spin_lock_irqsave(&timeline->lock, flags);
  633. list_del_init(&fence->child_list);
  634. spin_unlock_irqrestore(&timeline->lock, flags);
  635. dma_fence_signal(base);
  636. hgsl_isync_timeline_put(fence->timeline);
  637. }
  638. kfree(fence);
  639. }
  640. static void hgsl_isync_fence_value_str(struct dma_fence *base,
  641. char *str, int size)
  642. {
  643. snprintf(str, size, "%llu", base->context);
  644. }
  645. static const struct dma_fence_ops hgsl_isync_fence_ops = {
  646. .get_driver_name = hgsl_isync_get_driver_name,
  647. .get_timeline_name = hgsl_isync_get_timeline_name,
  648. .enable_signaling = hgsl_isync_enable_signaling,
  649. .signaled = hgsl_isync_has_signaled,
  650. .wait = dma_fence_default_wait,
  651. .release = hgsl_isync_fence_release,
  652. .fence_value_str = hgsl_isync_fence_value_str,
  653. };