kgsl_vbo.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/file.h>
  7. #include <linux/interval_tree.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/sync_file.h>
  10. #include <linux/slab.h>
  11. #include "kgsl_device.h"
  12. #include "kgsl_mmu.h"
  13. #include "kgsl_reclaim.h"
  14. #include "kgsl_sharedmem.h"
  15. #include "kgsl_trace.h"
  16. struct kgsl_memdesc_bind_range {
  17. struct kgsl_mem_entry *entry;
  18. struct interval_tree_node range;
  19. };
  20. static struct kgsl_memdesc_bind_range *bind_to_range(struct interval_tree_node *node)
  21. {
  22. return container_of(node, struct kgsl_memdesc_bind_range, range);
  23. }
  24. static struct kgsl_memdesc_bind_range *bind_range_create(u64 start, u64 last,
  25. struct kgsl_mem_entry *entry)
  26. {
  27. struct kgsl_memdesc_bind_range *range =
  28. kzalloc(sizeof(*range), GFP_KERNEL);
  29. if (!range)
  30. return ERR_PTR(-ENOMEM);
  31. range->range.start = start;
  32. range->range.last = last;
  33. range->entry = kgsl_mem_entry_get(entry);
  34. if (!range->entry) {
  35. kfree(range);
  36. return ERR_PTR(-EINVAL);
  37. }
  38. atomic_inc(&entry->vbo_count);
  39. return range;
  40. }
  41. static void bind_range_destroy(struct kgsl_memdesc_bind_range *range)
  42. {
  43. struct kgsl_mem_entry *entry = range->entry;
  44. atomic_dec(&entry->vbo_count);
  45. kgsl_mem_entry_put(entry);
  46. kfree(range);
  47. }
  48. static u64 bind_range_len(struct kgsl_memdesc_bind_range *range)
  49. {
  50. return (range->range.last - range->range.start) + 1;
  51. }
  52. void kgsl_memdesc_print_vbo_ranges(struct kgsl_mem_entry *entry,
  53. struct seq_file *s)
  54. {
  55. struct interval_tree_node *next;
  56. struct kgsl_memdesc *memdesc = &entry->memdesc;
  57. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO))
  58. return;
  59. /*
  60. * We are called in an atomic context so try to get the mutex but if we
  61. * don't then skip this item
  62. */
  63. if (!mutex_trylock(&memdesc->ranges_lock))
  64. return;
  65. next = interval_tree_iter_first(&memdesc->ranges, 0, ~0UL);
  66. while (next) {
  67. struct kgsl_memdesc_bind_range *range = bind_to_range(next);
  68. seq_printf(s, "%5d %5d 0x%16.16lx-0x%16.16lx\n",
  69. entry->id, range->entry->id, range->range.start,
  70. range->range.last);
  71. next = interval_tree_iter_next(next, 0, ~0UL);
  72. }
  73. mutex_unlock(&memdesc->ranges_lock);
  74. }
  75. static void kgsl_memdesc_remove_range(struct kgsl_mem_entry *target,
  76. u64 start, u64 last, struct kgsl_mem_entry *entry)
  77. {
  78. struct interval_tree_node *node, *next;
  79. struct kgsl_memdesc_bind_range *range;
  80. struct kgsl_memdesc *memdesc = &target->memdesc;
  81. mutex_lock(&memdesc->ranges_lock);
  82. next = interval_tree_iter_first(&memdesc->ranges, start, last);
  83. while (next) {
  84. node = next;
  85. range = bind_to_range(node);
  86. next = interval_tree_iter_next(node, start, last);
  87. /*
  88. * If entry is null, consider it as a special request. Unbind
  89. * the entire range between start and last in this case.
  90. */
  91. if (!entry || range->entry->id == entry->id) {
  92. if (kgsl_mmu_unmap_range(memdesc->pagetable,
  93. memdesc, range->range.start, bind_range_len(range)))
  94. continue;
  95. interval_tree_remove(node, &memdesc->ranges);
  96. trace_kgsl_mem_remove_bind_range(target,
  97. range->range.start, range->entry,
  98. bind_range_len(range));
  99. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
  100. kgsl_mmu_map_zero_page_to_range(memdesc->pagetable,
  101. memdesc, range->range.start, bind_range_len(range));
  102. bind_range_destroy(range);
  103. }
  104. }
  105. mutex_unlock(&memdesc->ranges_lock);
  106. }
  107. static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
  108. u64 start, u64 last, struct kgsl_mem_entry *entry, u64 offset)
  109. {
  110. struct interval_tree_node *node, *next;
  111. struct kgsl_memdesc *memdesc = &target->memdesc;
  112. struct kgsl_memdesc_bind_range *range =
  113. bind_range_create(start, last, entry);
  114. int ret = 0;
  115. if (IS_ERR(range))
  116. return PTR_ERR(range);
  117. mutex_lock(&memdesc->ranges_lock);
  118. /*
  119. * If the VBO maps the zero page, then we can unmap the requested range
  120. * in one call. Otherwise we have to figure out what ranges to unmap
  121. * while walking the interval tree.
  122. */
  123. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)) {
  124. ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc, start,
  125. last - start + 1);
  126. if (ret)
  127. goto error;
  128. }
  129. next = interval_tree_iter_first(&memdesc->ranges, start, last);
  130. while (next) {
  131. struct kgsl_memdesc_bind_range *cur;
  132. node = next;
  133. cur = bind_to_range(node);
  134. next = interval_tree_iter_next(node, start, last);
  135. trace_kgsl_mem_remove_bind_range(target, cur->range.start,
  136. cur->entry, bind_range_len(cur));
  137. interval_tree_remove(node, &memdesc->ranges);
  138. if (start <= cur->range.start) {
  139. if (last >= cur->range.last) {
  140. /* Unmap the entire cur range */
  141. if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
  142. ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
  143. cur->range.start,
  144. cur->range.last - cur->range.start + 1);
  145. if (ret) {
  146. interval_tree_insert(node, &memdesc->ranges);
  147. goto error;
  148. }
  149. }
  150. bind_range_destroy(cur);
  151. continue;
  152. }
  153. /* Unmap the range overlapping cur */
  154. if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
  155. ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
  156. cur->range.start,
  157. last - cur->range.start + 1);
  158. if (ret) {
  159. interval_tree_insert(node, &memdesc->ranges);
  160. goto error;
  161. }
  162. }
  163. /* Adjust the start of the mapping */
  164. cur->range.start = last + 1;
  165. /* And put it back into the tree */
  166. interval_tree_insert(node, &memdesc->ranges);
  167. trace_kgsl_mem_add_bind_range(target,
  168. cur->range.start, cur->entry, bind_range_len(cur));
  169. } else {
  170. if (last < cur->range.last) {
  171. struct kgsl_memdesc_bind_range *temp;
  172. /*
  173. * The range is split into two so make a new
  174. * entry for the far side
  175. */
  176. temp = bind_range_create(last + 1, cur->range.last,
  177. cur->entry);
  178. /* FIXME: Uhoh, this would be bad */
  179. BUG_ON(IS_ERR(temp));
  180. interval_tree_insert(&temp->range,
  181. &memdesc->ranges);
  182. trace_kgsl_mem_add_bind_range(target,
  183. temp->range.start,
  184. temp->entry, bind_range_len(temp));
  185. }
  186. /* Unmap the range overlapping cur */
  187. if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
  188. ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
  189. start,
  190. min_t(u64, cur->range.last, last) - start + 1);
  191. if (ret) {
  192. interval_tree_insert(node, &memdesc->ranges);
  193. goto error;
  194. }
  195. }
  196. cur->range.last = start - 1;
  197. interval_tree_insert(node, &memdesc->ranges);
  198. trace_kgsl_mem_add_bind_range(target, cur->range.start,
  199. cur->entry, bind_range_len(cur));
  200. }
  201. }
  202. ret = kgsl_mmu_map_child(memdesc->pagetable, memdesc, start,
  203. &entry->memdesc, offset, last - start + 1);
  204. if (ret)
  205. goto error;
  206. /* Add the new range */
  207. interval_tree_insert(&range->range, &memdesc->ranges);
  208. trace_kgsl_mem_add_bind_range(target, range->range.start,
  209. range->entry, bind_range_len(range));
  210. mutex_unlock(&memdesc->ranges_lock);
  211. return ret;
  212. error:
  213. bind_range_destroy(range);
  214. mutex_unlock(&memdesc->ranges_lock);
  215. return ret;
  216. }
  217. static void kgsl_sharedmem_vbo_put_gpuaddr(struct kgsl_memdesc *memdesc)
  218. {
  219. struct interval_tree_node *node, *next;
  220. struct kgsl_memdesc_bind_range *range;
  221. int ret = 0;
  222. bool unmap_fail;
  223. /*
  224. * If the VBO maps the zero range then we can unmap the entire
  225. * pagetable region in one call.
  226. */
  227. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
  228. ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
  229. 0, memdesc->size);
  230. unmap_fail = ret;
  231. /*
  232. * FIXME: do we have a use after free potential here? We might need to
  233. * lock this and set a "do not update" bit
  234. */
  235. /* Now delete each range and release the mem entries */
  236. next = interval_tree_iter_first(&memdesc->ranges, 0, ~0UL);
  237. while (next) {
  238. node = next;
  239. range = bind_to_range(node);
  240. next = interval_tree_iter_next(node, 0, ~0UL);
  241. interval_tree_remove(node, &memdesc->ranges);
  242. /* Unmap this range */
  243. if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
  244. ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
  245. range->range.start,
  246. range->range.last - range->range.start + 1);
  247. /* Put the child's refcount if unmap succeeds */
  248. if (!ret)
  249. bind_range_destroy(range);
  250. else
  251. kfree(range);
  252. unmap_fail = unmap_fail || ret;
  253. }
  254. if (unmap_fail)
  255. return;
  256. /* Put back the GPU address */
  257. kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
  258. memdesc->gpuaddr = 0;
  259. memdesc->pagetable = NULL;
  260. }
  261. static struct kgsl_memdesc_ops kgsl_vbo_ops = {
  262. .put_gpuaddr = kgsl_sharedmem_vbo_put_gpuaddr,
  263. };
  264. int kgsl_sharedmem_allocate_vbo(struct kgsl_device *device,
  265. struct kgsl_memdesc *memdesc, u64 size, u64 flags)
  266. {
  267. size = PAGE_ALIGN(size);
  268. /* Make sure that VBOs are supported by the MMU */
  269. if (WARN_ON_ONCE(!kgsl_mmu_has_feature(device,
  270. KGSL_MMU_SUPPORT_VBO)))
  271. return -EOPNOTSUPP;
  272. kgsl_memdesc_init(device, memdesc, flags);
  273. memdesc->priv = 0;
  274. memdesc->ops = &kgsl_vbo_ops;
  275. memdesc->size = size;
  276. /* Set up the interval tree and lock */
  277. memdesc->ranges = RB_ROOT_CACHED;
  278. mutex_init(&memdesc->ranges_lock);
  279. return 0;
  280. }
  281. static bool kgsl_memdesc_check_range(struct kgsl_memdesc *memdesc,
  282. u64 offset, u64 length)
  283. {
  284. return ((offset < memdesc->size) &&
  285. (offset + length > offset) &&
  286. (offset + length) <= memdesc->size);
  287. }
  288. static void kgsl_sharedmem_free_bind_op(struct kgsl_sharedmem_bind_op *op)
  289. {
  290. int i;
  291. if (IS_ERR_OR_NULL(op))
  292. return;
  293. for (i = 0; i < op->nr_ops; i++) {
  294. /* Decrement the vbo_count we added when creating the bind_op */
  295. if (op->ops[i].entry)
  296. atomic_dec(&op->ops[i].entry->vbo_count);
  297. /* Release the reference on the child entry */
  298. kgsl_mem_entry_put_deferred(op->ops[i].entry);
  299. }
  300. /* Release the reference on the target entry */
  301. kgsl_mem_entry_put_deferred(op->target);
  302. kvfree(op->ops);
  303. kfree(op);
  304. }
  305. struct kgsl_sharedmem_bind_op *
  306. kgsl_sharedmem_create_bind_op(struct kgsl_process_private *private,
  307. u32 target_id, void __user *ranges, u32 ranges_nents,
  308. u64 ranges_size)
  309. {
  310. struct kgsl_sharedmem_bind_op *op;
  311. struct kgsl_mem_entry *target;
  312. int ret, i;
  313. /* There must be at least one defined operation */
  314. if (!ranges_nents)
  315. return ERR_PTR(-EINVAL);
  316. /* Find the target memory entry */
  317. target = kgsl_sharedmem_find_id(private, target_id);
  318. if (!target)
  319. return ERR_PTR(-ENOENT);
  320. if (!(target->memdesc.flags & KGSL_MEMFLAGS_VBO)) {
  321. kgsl_mem_entry_put(target);
  322. return ERR_PTR(-EINVAL);
  323. }
  324. /* Make a container for the bind operations */
  325. op = kzalloc(sizeof(*op), GFP_KERNEL);
  326. if (!op) {
  327. kgsl_mem_entry_put(target);
  328. return ERR_PTR(-ENOMEM);
  329. }
  330. /*
  331. * Make an array for the individual operations. Use __GFP_NOWARN and
  332. * __GFP_NORETRY to make sure a very large request quietly fails
  333. */
  334. op->ops = kvcalloc(ranges_nents, sizeof(*op->ops),
  335. GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  336. if (!op->ops) {
  337. kfree(op);
  338. kgsl_mem_entry_put(target);
  339. return ERR_PTR(-ENOMEM);
  340. }
  341. op->nr_ops = ranges_nents;
  342. op->target = target;
  343. /* Make sure process is pinned in memory before proceeding */
  344. atomic_inc(&private->cmd_count);
  345. ret = kgsl_reclaim_to_pinned_state(private);
  346. if (ret)
  347. goto err;
  348. for (i = 0; i < ranges_nents; i++) {
  349. struct kgsl_gpumem_bind_range range;
  350. struct kgsl_mem_entry *entry;
  351. u32 size;
  352. size = min_t(u32, sizeof(range), ranges_size);
  353. ret = -EINVAL;
  354. if (copy_from_user(&range, ranges, size)) {
  355. ret = -EFAULT;
  356. goto err;
  357. }
  358. /* The offset must be page aligned */
  359. if (!PAGE_ALIGNED(range.target_offset))
  360. goto err;
  361. /* The length of the operation must be aligned and non zero */
  362. if (!range.length || !PAGE_ALIGNED(range.length))
  363. goto err;
  364. /* Make sure the range fits in the target */
  365. if (!kgsl_memdesc_check_range(&target->memdesc,
  366. range.target_offset, range.length))
  367. goto err;
  368. /*
  369. * Special case: Consider child id 0 as a special request incase of
  370. * unbind. This helps to unbind the specified range (could span multiple
  371. * child buffers) without supplying backing physical buffer information.
  372. */
  373. if (range.child_id == 0 && range.op == KGSL_GPUMEM_RANGE_OP_UNBIND) {
  374. op->ops[i].entry = NULL;
  375. op->ops[i].start = range.target_offset;
  376. op->ops[i].last = range.target_offset + range.length - 1;
  377. /* Child offset doesn't matter for unbind. set it to 0 */
  378. op->ops[i].child_offset = 0;
  379. op->ops[i].op = range.op;
  380. ranges += ranges_size;
  381. continue;
  382. }
  383. /* Get the child object */
  384. op->ops[i].entry = kgsl_sharedmem_find_id(private,
  385. range.child_id);
  386. entry = op->ops[i].entry;
  387. if (!entry) {
  388. ret = -ENOENT;
  389. goto err;
  390. }
  391. /* Keep the child pinned in memory */
  392. atomic_inc(&entry->vbo_count);
  393. /* Make sure the child is not a VBO */
  394. if ((entry->memdesc.flags & KGSL_MEMFLAGS_VBO)) {
  395. ret = -EINVAL;
  396. goto err;
  397. }
  398. /*
  399. * Make sure that only secure children are mapped in secure VBOs
  400. * and vice versa
  401. */
  402. if ((target->memdesc.flags & KGSL_MEMFLAGS_SECURE) !=
  403. (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)) {
  404. ret = -EPERM;
  405. goto err;
  406. }
  407. /* Make sure the range operation is valid */
  408. if (range.op != KGSL_GPUMEM_RANGE_OP_BIND &&
  409. range.op != KGSL_GPUMEM_RANGE_OP_UNBIND)
  410. goto err;
  411. if (range.op == KGSL_GPUMEM_RANGE_OP_BIND) {
  412. if (!PAGE_ALIGNED(range.child_offset))
  413. goto err;
  414. /* Make sure the range fits in the child */
  415. if (!kgsl_memdesc_check_range(&entry->memdesc,
  416. range.child_offset, range.length))
  417. goto err;
  418. } else {
  419. /* For unop operations the child offset must be 0 */
  420. if (range.child_offset)
  421. goto err;
  422. }
  423. op->ops[i].entry = entry;
  424. op->ops[i].start = range.target_offset;
  425. op->ops[i].last = range.target_offset + range.length - 1;
  426. op->ops[i].child_offset = range.child_offset;
  427. op->ops[i].op = range.op;
  428. ranges += ranges_size;
  429. }
  430. atomic_dec(&private->cmd_count);
  431. init_completion(&op->comp);
  432. kref_init(&op->ref);
  433. return op;
  434. err:
  435. atomic_dec(&private->cmd_count);
  436. kgsl_sharedmem_free_bind_op(op);
  437. return ERR_PTR(ret);
  438. }
  439. void kgsl_sharedmem_bind_range_destroy(struct kref *kref)
  440. {
  441. struct kgsl_sharedmem_bind_op *op = container_of(kref,
  442. struct kgsl_sharedmem_bind_op, ref);
  443. kgsl_sharedmem_free_bind_op(op);
  444. }
  445. static void kgsl_sharedmem_bind_worker(struct work_struct *work)
  446. {
  447. struct kgsl_sharedmem_bind_op *op = container_of(work,
  448. struct kgsl_sharedmem_bind_op, work);
  449. int i;
  450. for (i = 0; i < op->nr_ops; i++) {
  451. if (op->ops[i].op == KGSL_GPUMEM_RANGE_OP_BIND)
  452. kgsl_memdesc_add_range(op->target,
  453. op->ops[i].start,
  454. op->ops[i].last,
  455. op->ops[i].entry,
  456. op->ops[i].child_offset);
  457. else
  458. kgsl_memdesc_remove_range(op->target,
  459. op->ops[i].start,
  460. op->ops[i].last,
  461. op->ops[i].entry);
  462. }
  463. /* Wake up any threads waiting for the bind operation */
  464. complete_all(&op->comp);
  465. if (op->callback)
  466. op->callback(op);
  467. /* Put the refcount we took when scheduling the worker */
  468. kgsl_sharedmem_put_bind_op(op);
  469. }
  470. void kgsl_sharedmem_bind_ranges(struct kgsl_sharedmem_bind_op *op)
  471. {
  472. /* Take a reference to the operation while it is scheduled */
  473. kref_get(&op->ref);
  474. INIT_WORK(&op->work, kgsl_sharedmem_bind_worker);
  475. schedule_work(&op->work);
  476. }
  477. struct kgsl_sharedmem_bind_fence {
  478. struct dma_fence base;
  479. spinlock_t lock;
  480. int fd;
  481. struct kgsl_sharedmem_bind_op *op;
  482. };
  483. static const char *bind_fence_get_driver_name(struct dma_fence *fence)
  484. {
  485. return "kgsl_sharedmem_bind";
  486. }
  487. static const char *bind_fence_get_timeline_name(struct dma_fence *fence)
  488. {
  489. return "(unbound)";
  490. }
  491. static void bind_fence_release(struct dma_fence *fence)
  492. {
  493. struct kgsl_sharedmem_bind_fence *bind_fence = container_of(fence,
  494. struct kgsl_sharedmem_bind_fence, base);
  495. kgsl_sharedmem_put_bind_op(bind_fence->op);
  496. kfree(bind_fence);
  497. }
  498. static void
  499. kgsl_sharedmem_bind_fence_callback(struct kgsl_sharedmem_bind_op *op)
  500. {
  501. struct kgsl_sharedmem_bind_fence *bind_fence = op->data;
  502. dma_fence_signal(&bind_fence->base);
  503. dma_fence_put(&bind_fence->base);
  504. }
  505. static const struct dma_fence_ops kgsl_sharedmem_bind_fence_ops = {
  506. .get_driver_name = bind_fence_get_driver_name,
  507. .get_timeline_name = bind_fence_get_timeline_name,
  508. .release = bind_fence_release,
  509. };
  510. static struct kgsl_sharedmem_bind_fence *
  511. kgsl_sharedmem_bind_fence(struct kgsl_sharedmem_bind_op *op)
  512. {
  513. struct kgsl_sharedmem_bind_fence *fence;
  514. struct sync_file *sync_file;
  515. int fd;
  516. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  517. if (!fence)
  518. return ERR_PTR(-ENOMEM);
  519. spin_lock_init(&fence->lock);
  520. dma_fence_init(&fence->base, &kgsl_sharedmem_bind_fence_ops,
  521. &fence->lock, dma_fence_context_alloc(1), 0);
  522. fd = get_unused_fd_flags(O_CLOEXEC);
  523. if (fd < 0) {
  524. kfree(fence);
  525. return ERR_PTR(fd);
  526. }
  527. sync_file = sync_file_create(&fence->base);
  528. if (!sync_file) {
  529. put_unused_fd(fd);
  530. kfree(fence);
  531. return ERR_PTR(-ENOMEM);
  532. }
  533. fd_install(fd, sync_file->file);
  534. fence->fd = fd;
  535. fence->op = op;
  536. return fence;
  537. }
  538. long kgsl_ioctl_gpumem_bind_ranges(struct kgsl_device_private *dev_priv,
  539. unsigned int cmd, void *data)
  540. {
  541. struct kgsl_process_private *private = dev_priv->process_priv;
  542. struct kgsl_gpumem_bind_ranges *param = data;
  543. struct kgsl_sharedmem_bind_op *op;
  544. int ret;
  545. /* If ranges_size isn't set, return the expected size to the user */
  546. if (!param->ranges_size) {
  547. param->ranges_size = sizeof(struct kgsl_gpumem_bind_range);
  548. return 0;
  549. }
  550. /* FENCE_OUT only makes sense with ASYNC */
  551. if ((param->flags & KGSL_GPUMEM_BIND_FENCE_OUT) &&
  552. !(param->flags & KGSL_GPUMEM_BIND_ASYNC))
  553. return -EINVAL;
  554. op = kgsl_sharedmem_create_bind_op(private, param->id,
  555. u64_to_user_ptr(param->ranges), param->ranges_nents,
  556. param->ranges_size);
  557. if (IS_ERR(op))
  558. return PTR_ERR(op);
  559. if (param->flags & KGSL_GPUMEM_BIND_ASYNC) {
  560. struct kgsl_sharedmem_bind_fence *fence;
  561. if (param->flags & KGSL_GPUMEM_BIND_FENCE_OUT) {
  562. fence = kgsl_sharedmem_bind_fence(op);
  563. if (IS_ERR(fence)) {
  564. kgsl_sharedmem_put_bind_op(op);
  565. return PTR_ERR(fence);
  566. }
  567. op->data = fence;
  568. op->callback = kgsl_sharedmem_bind_fence_callback;
  569. param->fence_id = fence->fd;
  570. }
  571. kgsl_sharedmem_bind_ranges(op);
  572. if (!(param->flags & KGSL_GPUMEM_BIND_FENCE_OUT))
  573. kgsl_sharedmem_put_bind_op(op);
  574. return 0;
  575. }
  576. /*
  577. * Schedule the work. All the resources will be released after
  578. * the bind operation is done
  579. */
  580. kgsl_sharedmem_bind_ranges(op);
  581. ret = wait_for_completion_interruptible(&op->comp);
  582. kgsl_sharedmem_put_bind_op(op);
  583. return ret;
  584. }