drm_syncobj.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518
  1. /*
  2. * Copyright 2017 Red Hat
  3. * Parts ported from amdgpu (fence wait code).
  4. * Copyright 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  23. * IN THE SOFTWARE.
  24. *
  25. * Authors:
  26. *
  27. */
  28. /**
  29. * DOC: Overview
  30. *
  31. * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
  32. * container for a synchronization primitive which can be used by userspace
  33. * to explicitly synchronize GPU commands, can be shared between userspace
  34. * processes, and can be shared between different DRM drivers.
  35. * Their primary use-case is to implement Vulkan fences and semaphores.
  36. * The syncobj userspace API provides ioctls for several operations:
  37. *
  38. * - Creation and destruction of syncobjs
  39. * - Import and export of syncobjs to/from a syncobj file descriptor
  40. * - Import and export a syncobj's underlying fence to/from a sync file
  41. * - Reset a syncobj (set its fence to NULL)
  42. * - Signal a syncobj (set a trivially signaled fence)
  43. * - Wait for a syncobj's fence to appear and be signaled
  44. *
  45. * The syncobj userspace API also provides operations to manipulate a syncobj
  46. * in terms of a timeline of struct &dma_fence_chain rather than a single
  47. * struct &dma_fence, through the following operations:
  48. *
  49. * - Signal a given point on the timeline
  50. * - Wait for a given point to appear and/or be signaled
  51. * - Import and export from/to a given point of a timeline
  52. *
  53. * At it's core, a syncobj is simply a wrapper around a pointer to a struct
  54. * &dma_fence which may be NULL.
  55. * When a syncobj is first created, its pointer is either NULL or a pointer
  56. * to an already signaled fence depending on whether the
  57. * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
  58. * &DRM_IOCTL_SYNCOBJ_CREATE.
  59. *
  60. * If the syncobj is considered as a binary (its state is either signaled or
  61. * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
  62. * the syncobj, the syncobj's fence is replaced with a fence which will be
  63. * signaled by the completion of that work.
  64. * If the syncobj is considered as a timeline primitive, when GPU work is
  65. * enqueued in a DRM driver to signal the a given point of the syncobj, a new
  66. * struct &dma_fence_chain pointing to the DRM driver's fence and also
  67. * pointing to the previous fence that was in the syncobj. The new struct
  68. * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
  69. * completion of the DRM driver's work and also any work associated with the
  70. * fence previously in the syncobj.
  71. *
  72. * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
  73. * time the work is enqueued, it waits on the syncobj's fence before
  74. * submitting the work to hardware. That fence is either :
  75. *
  76. * - The syncobj's current fence if the syncobj is considered as a binary
  77. * primitive.
  78. * - The struct &dma_fence associated with a given point if the syncobj is
  79. * considered as a timeline primitive.
  80. *
  81. * If the syncobj's fence is NULL or not present in the syncobj's timeline,
  82. * the enqueue operation is expected to fail.
  83. *
  84. * With binary syncobj, all manipulation of the syncobjs's fence happens in
  85. * terms of the current fence at the time the ioctl is called by userspace
  86. * regardless of whether that operation is an immediate host-side operation
  87. * (signal or reset) or or an operation which is enqueued in some driver
  88. * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
  89. * to manipulate a syncobj from the host by resetting its pointer to NULL or
  90. * setting its pointer to a fence which is already signaled.
  91. *
  92. * With a timeline syncobj, all manipulation of the synobj's fence happens in
  93. * terms of a u64 value referring to point in the timeline. See
  94. * dma_fence_chain_find_seqno() to see how a given point is found in the
  95. * timeline.
  96. *
  97. * Note that applications should be careful to always use timeline set of
  98. * ioctl() when dealing with syncobj considered as timeline. Using a binary
  99. * set of ioctl() with a syncobj considered as timeline could result incorrect
  100. * synchronization. The use of binary syncobj is supported through the
  101. * timeline set of ioctl() by using a point value of 0, this will reproduce
  102. * the behavior of the binary set of ioctl() (for example replace the
  103. * syncobj's fence when signaling).
  104. *
  105. *
  106. * Host-side wait on syncobjs
  107. * --------------------------
  108. *
  109. * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
  110. * host-side wait on all of the syncobj fences simultaneously.
  111. * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
  112. * all of the syncobj fences to be signaled before it returns.
  113. * Otherwise, it returns once at least one syncobj fence has been signaled
  114. * and the index of a signaled fence is written back to the client.
  115. *
  116. * Unlike the enqueued GPU work dependencies which fail if they see a NULL
  117. * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
  118. * the host-side wait will first wait for the syncobj to receive a non-NULL
  119. * fence and then wait on that fence.
  120. * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
  121. * syncobjs in the array has a NULL fence, -EINVAL will be returned.
  122. * Assuming the syncobj starts off with a NULL fence, this allows a client
  123. * to do a host wait in one thread (or process) which waits on GPU work
  124. * submitted in another thread (or process) without having to manually
  125. * synchronize between the two.
  126. * This requirement is inherited from the Vulkan fence API.
  127. *
  128. * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
  129. * handles as well as an array of u64 points and does a host-side wait on all
  130. * of syncobj fences at the given points simultaneously.
  131. *
  132. * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
  133. * fence to materialize on the timeline without waiting for the fence to be
  134. * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
  135. * requirement is inherited from the wait-before-signal behavior required by
  136. * the Vulkan timeline semaphore API.
  137. *
  138. *
  139. * Import/export of syncobjs
  140. * -------------------------
  141. *
  142. * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
  143. * provide two mechanisms for import/export of syncobjs.
  144. *
  145. * The first lets the client import or export an entire syncobj to a file
  146. * descriptor.
  147. * These fd's are opaque and have no other use case, except passing the
  148. * syncobj between processes.
  149. * All exported file descriptors and any syncobj handles created as a
  150. * result of importing those file descriptors own a reference to the
  151. * same underlying struct &drm_syncobj and the syncobj can be used
  152. * persistently across all the processes with which it is shared.
  153. * The syncobj is freed only once the last reference is dropped.
  154. * Unlike dma-buf, importing a syncobj creates a new handle (with its own
  155. * reference) for every import instead of de-duplicating.
  156. * The primary use-case of this persistent import/export is for shared
  157. * Vulkan fences and semaphores.
  158. *
  159. * The second import/export mechanism, which is indicated by
  160. * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
  161. * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
  162. * import/export the syncobj's current fence from/to a &sync_file.
  163. * When a syncobj is exported to a sync file, that sync file wraps the
  164. * sycnobj's fence at the time of export and any later signal or reset
  165. * operations on the syncobj will not affect the exported sync file.
  166. * When a sync file is imported into a syncobj, the syncobj's fence is set
  167. * to the fence wrapped by that sync file.
  168. * Because sync files are immutable, resetting or signaling the syncobj
  169. * will not affect any sync files whose fences have been imported into the
  170. * syncobj.
  171. *
  172. *
  173. * Import/export of timeline points in timeline syncobjs
  174. * -----------------------------------------------------
  175. *
  176. * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
  177. * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
  178. * into another syncobj.
  179. *
  180. * Note that if you want to transfer a struct &dma_fence_chain from a given
  181. * point on a timeline syncobj from/into a binary syncobj, you can use the
  182. * point 0 to mean take/replace the fence in the syncobj.
  183. */
  184. #include <linux/anon_inodes.h>
  185. #include <linux/dma-fence-unwrap.h>
  186. #include <linux/file.h>
  187. #include <linux/fs.h>
  188. #include <linux/sched/signal.h>
  189. #include <linux/sync_file.h>
  190. #include <linux/uaccess.h>
  191. #include <drm/drm.h>
  192. #include <drm/drm_drv.h>
  193. #include <drm/drm_file.h>
  194. #include <drm/drm_gem.h>
  195. #include <drm/drm_print.h>
  196. #include <drm/drm_syncobj.h>
  197. #include <drm/drm_utils.h>
  198. #include "drm_internal.h"
  199. struct syncobj_wait_entry {
  200. struct list_head node;
  201. struct task_struct *task;
  202. struct dma_fence *fence;
  203. struct dma_fence_cb fence_cb;
  204. u64 point;
  205. };
  206. static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
  207. struct syncobj_wait_entry *wait);
  208. /**
  209. * drm_syncobj_find - lookup and reference a sync object.
  210. * @file_private: drm file private pointer
  211. * @handle: sync object handle to lookup.
  212. *
  213. * Returns a reference to the syncobj pointed to by handle or NULL. The
  214. * reference must be released by calling drm_syncobj_put().
  215. */
  216. struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
  217. u32 handle)
  218. {
  219. struct drm_syncobj *syncobj;
  220. spin_lock(&file_private->syncobj_table_lock);
  221. /* Check if we currently have a reference on the object */
  222. syncobj = idr_find(&file_private->syncobj_idr, handle);
  223. if (syncobj)
  224. drm_syncobj_get(syncobj);
  225. spin_unlock(&file_private->syncobj_table_lock);
  226. return syncobj;
  227. }
  228. EXPORT_SYMBOL(drm_syncobj_find);
  229. static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
  230. struct syncobj_wait_entry *wait)
  231. {
  232. struct dma_fence *fence;
  233. if (wait->fence)
  234. return;
  235. spin_lock(&syncobj->lock);
  236. /* We've already tried once to get a fence and failed. Now that we
  237. * have the lock, try one more time just to be sure we don't add a
  238. * callback when a fence has already been set.
  239. */
  240. fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
  241. if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
  242. dma_fence_put(fence);
  243. list_add_tail(&wait->node, &syncobj->cb_list);
  244. } else if (!fence) {
  245. wait->fence = dma_fence_get_stub();
  246. } else {
  247. wait->fence = fence;
  248. }
  249. spin_unlock(&syncobj->lock);
  250. }
  251. static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
  252. struct syncobj_wait_entry *wait)
  253. {
  254. if (!wait->node.next)
  255. return;
  256. spin_lock(&syncobj->lock);
  257. list_del_init(&wait->node);
  258. spin_unlock(&syncobj->lock);
  259. }
  260. /**
  261. * drm_syncobj_add_point - add new timeline point to the syncobj
  262. * @syncobj: sync object to add timeline point do
  263. * @chain: chain node to use to add the point
  264. * @fence: fence to encapsulate in the chain node
  265. * @point: sequence number to use for the point
  266. *
  267. * Add the chain node as new timeline point to the syncobj.
  268. */
  269. void drm_syncobj_add_point(struct drm_syncobj *syncobj,
  270. struct dma_fence_chain *chain,
  271. struct dma_fence *fence,
  272. uint64_t point)
  273. {
  274. struct syncobj_wait_entry *cur, *tmp;
  275. struct dma_fence *prev;
  276. dma_fence_get(fence);
  277. spin_lock(&syncobj->lock);
  278. prev = drm_syncobj_fence_get(syncobj);
  279. /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
  280. if (prev && prev->seqno >= point)
  281. DRM_DEBUG("You are adding an unorder point to timeline!\n");
  282. dma_fence_chain_init(chain, prev, fence, point);
  283. rcu_assign_pointer(syncobj->fence, &chain->base);
  284. list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
  285. syncobj_wait_syncobj_func(syncobj, cur);
  286. spin_unlock(&syncobj->lock);
  287. /* Walk the chain once to trigger garbage collection */
  288. dma_fence_chain_for_each(fence, prev);
  289. dma_fence_put(prev);
  290. }
  291. EXPORT_SYMBOL(drm_syncobj_add_point);
  292. /**
  293. * drm_syncobj_replace_fence - replace fence in a sync object.
  294. * @syncobj: Sync object to replace fence in
  295. * @fence: fence to install in sync file.
  296. *
  297. * This replaces the fence on a sync object.
  298. */
  299. void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
  300. struct dma_fence *fence)
  301. {
  302. struct dma_fence *old_fence;
  303. struct syncobj_wait_entry *cur, *tmp;
  304. if (fence)
  305. dma_fence_get(fence);
  306. spin_lock(&syncobj->lock);
  307. old_fence = rcu_dereference_protected(syncobj->fence,
  308. lockdep_is_held(&syncobj->lock));
  309. rcu_assign_pointer(syncobj->fence, fence);
  310. if (fence != old_fence) {
  311. list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
  312. syncobj_wait_syncobj_func(syncobj, cur);
  313. }
  314. spin_unlock(&syncobj->lock);
  315. dma_fence_put(old_fence);
  316. }
  317. EXPORT_SYMBOL(drm_syncobj_replace_fence);
  318. /**
  319. * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
  320. * @syncobj: sync object to assign the fence on
  321. *
  322. * Assign a already signaled stub fence to the sync object.
  323. */
  324. static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
  325. {
  326. struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
  327. if (!fence)
  328. return -ENOMEM;
  329. drm_syncobj_replace_fence(syncobj, fence);
  330. dma_fence_put(fence);
  331. return 0;
  332. }
  333. /* 5s default for wait submission */
  334. #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
  335. /**
  336. * drm_syncobj_find_fence - lookup and reference the fence in a sync object
  337. * @file_private: drm file private pointer
  338. * @handle: sync object handle to lookup.
  339. * @point: timeline point
  340. * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
  341. * @fence: out parameter for the fence
  342. *
  343. * This is just a convenience function that combines drm_syncobj_find() and
  344. * drm_syncobj_fence_get().
  345. *
  346. * Returns 0 on success or a negative error value on failure. On success @fence
  347. * contains a reference to the fence, which must be released by calling
  348. * dma_fence_put().
  349. */
  350. int drm_syncobj_find_fence(struct drm_file *file_private,
  351. u32 handle, u64 point, u64 flags,
  352. struct dma_fence **fence)
  353. {
  354. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  355. struct syncobj_wait_entry wait;
  356. u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
  357. int ret;
  358. if (!syncobj)
  359. return -ENOENT;
  360. /* Waiting for userspace with locks help is illegal cause that can
  361. * trivial deadlock with page faults for example. Make lockdep complain
  362. * about it early on.
  363. */
  364. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
  365. might_sleep();
  366. lockdep_assert_none_held_once();
  367. }
  368. *fence = drm_syncobj_fence_get(syncobj);
  369. if (*fence) {
  370. ret = dma_fence_chain_find_seqno(fence, point);
  371. if (!ret) {
  372. /* If the requested seqno is already signaled
  373. * drm_syncobj_find_fence may return a NULL
  374. * fence. To make sure the recipient gets
  375. * signalled, use a new fence instead.
  376. */
  377. if (!*fence)
  378. *fence = dma_fence_get_stub();
  379. goto out;
  380. }
  381. dma_fence_put(*fence);
  382. } else {
  383. ret = -EINVAL;
  384. }
  385. if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
  386. goto out;
  387. memset(&wait, 0, sizeof(wait));
  388. wait.task = current;
  389. wait.point = point;
  390. drm_syncobj_fence_add_wait(syncobj, &wait);
  391. do {
  392. set_current_state(TASK_INTERRUPTIBLE);
  393. if (wait.fence) {
  394. ret = 0;
  395. break;
  396. }
  397. if (timeout == 0) {
  398. ret = -ETIME;
  399. break;
  400. }
  401. if (signal_pending(current)) {
  402. ret = -ERESTARTSYS;
  403. break;
  404. }
  405. timeout = schedule_timeout(timeout);
  406. } while (1);
  407. __set_current_state(TASK_RUNNING);
  408. *fence = wait.fence;
  409. if (wait.node.next)
  410. drm_syncobj_remove_wait(syncobj, &wait);
  411. out:
  412. drm_syncobj_put(syncobj);
  413. return ret;
  414. }
  415. EXPORT_SYMBOL(drm_syncobj_find_fence);
  416. /**
  417. * drm_syncobj_free - free a sync object.
  418. * @kref: kref to free.
  419. *
  420. * Only to be called from kref_put in drm_syncobj_put.
  421. */
  422. void drm_syncobj_free(struct kref *kref)
  423. {
  424. struct drm_syncobj *syncobj = container_of(kref,
  425. struct drm_syncobj,
  426. refcount);
  427. drm_syncobj_replace_fence(syncobj, NULL);
  428. kfree(syncobj);
  429. }
  430. EXPORT_SYMBOL(drm_syncobj_free);
  431. /**
  432. * drm_syncobj_create - create a new syncobj
  433. * @out_syncobj: returned syncobj
  434. * @flags: DRM_SYNCOBJ_* flags
  435. * @fence: if non-NULL, the syncobj will represent this fence
  436. *
  437. * This is the first function to create a sync object. After creating, drivers
  438. * probably want to make it available to userspace, either through
  439. * drm_syncobj_get_handle() or drm_syncobj_get_fd().
  440. *
  441. * Returns 0 on success or a negative error value on failure.
  442. */
  443. int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
  444. struct dma_fence *fence)
  445. {
  446. int ret;
  447. struct drm_syncobj *syncobj;
  448. syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
  449. if (!syncobj)
  450. return -ENOMEM;
  451. kref_init(&syncobj->refcount);
  452. INIT_LIST_HEAD(&syncobj->cb_list);
  453. spin_lock_init(&syncobj->lock);
  454. if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
  455. ret = drm_syncobj_assign_null_handle(syncobj);
  456. if (ret < 0) {
  457. drm_syncobj_put(syncobj);
  458. return ret;
  459. }
  460. }
  461. if (fence)
  462. drm_syncobj_replace_fence(syncobj, fence);
  463. *out_syncobj = syncobj;
  464. return 0;
  465. }
  466. EXPORT_SYMBOL(drm_syncobj_create);
  467. /**
  468. * drm_syncobj_get_handle - get a handle from a syncobj
  469. * @file_private: drm file private pointer
  470. * @syncobj: Sync object to export
  471. * @handle: out parameter with the new handle
  472. *
  473. * Exports a sync object created with drm_syncobj_create() as a handle on
  474. * @file_private to userspace.
  475. *
  476. * Returns 0 on success or a negative error value on failure.
  477. */
  478. int drm_syncobj_get_handle(struct drm_file *file_private,
  479. struct drm_syncobj *syncobj, u32 *handle)
  480. {
  481. int ret;
  482. /* take a reference to put in the idr */
  483. drm_syncobj_get(syncobj);
  484. idr_preload(GFP_KERNEL);
  485. spin_lock(&file_private->syncobj_table_lock);
  486. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  487. spin_unlock(&file_private->syncobj_table_lock);
  488. idr_preload_end();
  489. if (ret < 0) {
  490. drm_syncobj_put(syncobj);
  491. return ret;
  492. }
  493. *handle = ret;
  494. return 0;
  495. }
  496. EXPORT_SYMBOL(drm_syncobj_get_handle);
  497. static int drm_syncobj_create_as_handle(struct drm_file *file_private,
  498. u32 *handle, uint32_t flags)
  499. {
  500. int ret;
  501. struct drm_syncobj *syncobj;
  502. ret = drm_syncobj_create(&syncobj, flags, NULL);
  503. if (ret)
  504. return ret;
  505. ret = drm_syncobj_get_handle(file_private, syncobj, handle);
  506. drm_syncobj_put(syncobj);
  507. return ret;
  508. }
  509. static int drm_syncobj_destroy(struct drm_file *file_private,
  510. u32 handle)
  511. {
  512. struct drm_syncobj *syncobj;
  513. spin_lock(&file_private->syncobj_table_lock);
  514. syncobj = idr_remove(&file_private->syncobj_idr, handle);
  515. spin_unlock(&file_private->syncobj_table_lock);
  516. if (!syncobj)
  517. return -EINVAL;
  518. drm_syncobj_put(syncobj);
  519. return 0;
  520. }
  521. static int drm_syncobj_file_release(struct inode *inode, struct file *file)
  522. {
  523. struct drm_syncobj *syncobj = file->private_data;
  524. drm_syncobj_put(syncobj);
  525. return 0;
  526. }
  527. static const struct file_operations drm_syncobj_file_fops = {
  528. .release = drm_syncobj_file_release,
  529. };
  530. /**
  531. * drm_syncobj_get_fd - get a file descriptor from a syncobj
  532. * @syncobj: Sync object to export
  533. * @p_fd: out parameter with the new file descriptor
  534. *
  535. * Exports a sync object created with drm_syncobj_create() as a file descriptor.
  536. *
  537. * Returns 0 on success or a negative error value on failure.
  538. */
  539. int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
  540. {
  541. struct file *file;
  542. int fd;
  543. fd = get_unused_fd_flags(O_CLOEXEC);
  544. if (fd < 0)
  545. return fd;
  546. file = anon_inode_getfile("syncobj_file",
  547. &drm_syncobj_file_fops,
  548. syncobj, 0);
  549. if (IS_ERR(file)) {
  550. put_unused_fd(fd);
  551. return PTR_ERR(file);
  552. }
  553. drm_syncobj_get(syncobj);
  554. fd_install(fd, file);
  555. *p_fd = fd;
  556. return 0;
  557. }
  558. EXPORT_SYMBOL(drm_syncobj_get_fd);
  559. static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
  560. u32 handle, int *p_fd)
  561. {
  562. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  563. int ret;
  564. if (!syncobj)
  565. return -EINVAL;
  566. ret = drm_syncobj_get_fd(syncobj, p_fd);
  567. drm_syncobj_put(syncobj);
  568. return ret;
  569. }
  570. static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
  571. int fd, u32 *handle)
  572. {
  573. struct drm_syncobj *syncobj;
  574. struct fd f = fdget(fd);
  575. int ret;
  576. if (!f.file)
  577. return -EINVAL;
  578. if (f.file->f_op != &drm_syncobj_file_fops) {
  579. fdput(f);
  580. return -EINVAL;
  581. }
  582. /* take a reference to put in the idr */
  583. syncobj = f.file->private_data;
  584. drm_syncobj_get(syncobj);
  585. idr_preload(GFP_KERNEL);
  586. spin_lock(&file_private->syncobj_table_lock);
  587. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  588. spin_unlock(&file_private->syncobj_table_lock);
  589. idr_preload_end();
  590. if (ret > 0) {
  591. *handle = ret;
  592. ret = 0;
  593. } else
  594. drm_syncobj_put(syncobj);
  595. fdput(f);
  596. return ret;
  597. }
  598. static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
  599. int fd, int handle)
  600. {
  601. struct dma_fence *fence = sync_file_get_fence(fd);
  602. struct drm_syncobj *syncobj;
  603. if (!fence)
  604. return -EINVAL;
  605. syncobj = drm_syncobj_find(file_private, handle);
  606. if (!syncobj) {
  607. dma_fence_put(fence);
  608. return -ENOENT;
  609. }
  610. drm_syncobj_replace_fence(syncobj, fence);
  611. dma_fence_put(fence);
  612. drm_syncobj_put(syncobj);
  613. return 0;
  614. }
  615. static int drm_syncobj_export_sync_file(struct drm_file *file_private,
  616. int handle, int *p_fd)
  617. {
  618. int ret;
  619. struct dma_fence *fence;
  620. struct sync_file *sync_file;
  621. int fd = get_unused_fd_flags(O_CLOEXEC);
  622. if (fd < 0)
  623. return fd;
  624. ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
  625. if (ret)
  626. goto err_put_fd;
  627. sync_file = sync_file_create(fence);
  628. dma_fence_put(fence);
  629. if (!sync_file) {
  630. ret = -EINVAL;
  631. goto err_put_fd;
  632. }
  633. fd_install(fd, sync_file->file);
  634. *p_fd = fd;
  635. return 0;
  636. err_put_fd:
  637. put_unused_fd(fd);
  638. return ret;
  639. }
  640. /**
  641. * drm_syncobj_open - initializes syncobj file-private structures at devnode open time
  642. * @file_private: drm file-private structure to set up
  643. *
  644. * Called at device open time, sets up the structure for handling refcounting
  645. * of sync objects.
  646. */
  647. void
  648. drm_syncobj_open(struct drm_file *file_private)
  649. {
  650. idr_init_base(&file_private->syncobj_idr, 1);
  651. spin_lock_init(&file_private->syncobj_table_lock);
  652. }
  653. static int
  654. drm_syncobj_release_handle(int id, void *ptr, void *data)
  655. {
  656. struct drm_syncobj *syncobj = ptr;
  657. drm_syncobj_put(syncobj);
  658. return 0;
  659. }
  660. /**
  661. * drm_syncobj_release - release file-private sync object resources
  662. * @file_private: drm file-private structure to clean up
  663. *
  664. * Called at close time when the filp is going away.
  665. *
  666. * Releases any remaining references on objects by this filp.
  667. */
  668. void
  669. drm_syncobj_release(struct drm_file *file_private)
  670. {
  671. idr_for_each(&file_private->syncobj_idr,
  672. &drm_syncobj_release_handle, file_private);
  673. idr_destroy(&file_private->syncobj_idr);
  674. }
  675. int
  676. drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
  677. struct drm_file *file_private)
  678. {
  679. struct drm_syncobj_create *args = data;
  680. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  681. return -EOPNOTSUPP;
  682. /* no valid flags yet */
  683. if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
  684. return -EINVAL;
  685. return drm_syncobj_create_as_handle(file_private,
  686. &args->handle, args->flags);
  687. }
  688. int
  689. drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
  690. struct drm_file *file_private)
  691. {
  692. struct drm_syncobj_destroy *args = data;
  693. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  694. return -EOPNOTSUPP;
  695. /* make sure padding is empty */
  696. if (args->pad)
  697. return -EINVAL;
  698. return drm_syncobj_destroy(file_private, args->handle);
  699. }
  700. int
  701. drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
  702. struct drm_file *file_private)
  703. {
  704. struct drm_syncobj_handle *args = data;
  705. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  706. return -EOPNOTSUPP;
  707. if (args->pad)
  708. return -EINVAL;
  709. if (args->flags != 0 &&
  710. args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  711. return -EINVAL;
  712. if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  713. return drm_syncobj_export_sync_file(file_private, args->handle,
  714. &args->fd);
  715. return drm_syncobj_handle_to_fd(file_private, args->handle,
  716. &args->fd);
  717. }
  718. int
  719. drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
  720. struct drm_file *file_private)
  721. {
  722. struct drm_syncobj_handle *args = data;
  723. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  724. return -EOPNOTSUPP;
  725. if (args->pad)
  726. return -EINVAL;
  727. if (args->flags != 0 &&
  728. args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  729. return -EINVAL;
  730. if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  731. return drm_syncobj_import_sync_file_fence(file_private,
  732. args->fd,
  733. args->handle);
  734. return drm_syncobj_fd_to_handle(file_private, args->fd,
  735. &args->handle);
  736. }
  737. static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
  738. struct drm_syncobj_transfer *args)
  739. {
  740. struct drm_syncobj *timeline_syncobj = NULL;
  741. struct dma_fence *fence, *tmp;
  742. struct dma_fence_chain *chain;
  743. int ret;
  744. timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
  745. if (!timeline_syncobj) {
  746. return -ENOENT;
  747. }
  748. ret = drm_syncobj_find_fence(file_private, args->src_handle,
  749. args->src_point, args->flags,
  750. &tmp);
  751. if (ret)
  752. goto err_put_timeline;
  753. fence = dma_fence_unwrap_merge(tmp);
  754. dma_fence_put(tmp);
  755. if (!fence) {
  756. ret = -ENOMEM;
  757. goto err_put_timeline;
  758. }
  759. chain = dma_fence_chain_alloc();
  760. if (!chain) {
  761. ret = -ENOMEM;
  762. goto err_free_fence;
  763. }
  764. drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
  765. err_free_fence:
  766. dma_fence_put(fence);
  767. err_put_timeline:
  768. drm_syncobj_put(timeline_syncobj);
  769. return ret;
  770. }
  771. static int
  772. drm_syncobj_transfer_to_binary(struct drm_file *file_private,
  773. struct drm_syncobj_transfer *args)
  774. {
  775. struct drm_syncobj *binary_syncobj = NULL;
  776. struct dma_fence *fence;
  777. int ret;
  778. binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
  779. if (!binary_syncobj)
  780. return -ENOENT;
  781. ret = drm_syncobj_find_fence(file_private, args->src_handle,
  782. args->src_point, args->flags, &fence);
  783. if (ret)
  784. goto err;
  785. drm_syncobj_replace_fence(binary_syncobj, fence);
  786. dma_fence_put(fence);
  787. err:
  788. drm_syncobj_put(binary_syncobj);
  789. return ret;
  790. }
  791. int
  792. drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
  793. struct drm_file *file_private)
  794. {
  795. struct drm_syncobj_transfer *args = data;
  796. int ret;
  797. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  798. return -EOPNOTSUPP;
  799. if (args->pad)
  800. return -EINVAL;
  801. if (args->dst_point)
  802. ret = drm_syncobj_transfer_to_timeline(file_private, args);
  803. else
  804. ret = drm_syncobj_transfer_to_binary(file_private, args);
  805. return ret;
  806. }
  807. static void syncobj_wait_fence_func(struct dma_fence *fence,
  808. struct dma_fence_cb *cb)
  809. {
  810. struct syncobj_wait_entry *wait =
  811. container_of(cb, struct syncobj_wait_entry, fence_cb);
  812. wake_up_process(wait->task);
  813. }
  814. static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
  815. struct syncobj_wait_entry *wait)
  816. {
  817. struct dma_fence *fence;
  818. /* This happens inside the syncobj lock */
  819. fence = rcu_dereference_protected(syncobj->fence,
  820. lockdep_is_held(&syncobj->lock));
  821. dma_fence_get(fence);
  822. if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
  823. dma_fence_put(fence);
  824. return;
  825. } else if (!fence) {
  826. wait->fence = dma_fence_get_stub();
  827. } else {
  828. wait->fence = fence;
  829. }
  830. wake_up_process(wait->task);
  831. list_del_init(&wait->node);
  832. }
  833. static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
  834. void __user *user_points,
  835. uint32_t count,
  836. uint32_t flags,
  837. signed long timeout,
  838. uint32_t *idx)
  839. {
  840. struct syncobj_wait_entry *entries;
  841. struct dma_fence *fence;
  842. uint64_t *points;
  843. uint32_t signaled_count, i;
  844. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
  845. lockdep_assert_none_held_once();
  846. points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
  847. if (points == NULL)
  848. return -ENOMEM;
  849. if (!user_points) {
  850. memset(points, 0, count * sizeof(uint64_t));
  851. } else if (copy_from_user(points, user_points,
  852. sizeof(uint64_t) * count)) {
  853. timeout = -EFAULT;
  854. goto err_free_points;
  855. }
  856. entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
  857. if (!entries) {
  858. timeout = -ENOMEM;
  859. goto err_free_points;
  860. }
  861. /* Walk the list of sync objects and initialize entries. We do
  862. * this up-front so that we can properly return -EINVAL if there is
  863. * a syncobj with a missing fence and then never have the chance of
  864. * returning -EINVAL again.
  865. */
  866. signaled_count = 0;
  867. for (i = 0; i < count; ++i) {
  868. struct dma_fence *fence;
  869. entries[i].task = current;
  870. entries[i].point = points[i];
  871. fence = drm_syncobj_fence_get(syncobjs[i]);
  872. if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
  873. dma_fence_put(fence);
  874. if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
  875. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
  876. continue;
  877. } else {
  878. timeout = -EINVAL;
  879. goto cleanup_entries;
  880. }
  881. }
  882. if (fence)
  883. entries[i].fence = fence;
  884. else
  885. entries[i].fence = dma_fence_get_stub();
  886. if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
  887. dma_fence_is_signaled(entries[i].fence)) {
  888. if (signaled_count == 0 && idx)
  889. *idx = i;
  890. signaled_count++;
  891. }
  892. }
  893. if (signaled_count == count ||
  894. (signaled_count > 0 &&
  895. !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
  896. goto cleanup_entries;
  897. /* There's a very annoying laxness in the dma_fence API here, in
  898. * that backends are not required to automatically report when a
  899. * fence is signaled prior to fence->ops->enable_signaling() being
  900. * called. So here if we fail to match signaled_count, we need to
  901. * fallthough and try a 0 timeout wait!
  902. */
  903. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
  904. for (i = 0; i < count; ++i)
  905. drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
  906. }
  907. do {
  908. set_current_state(TASK_INTERRUPTIBLE);
  909. signaled_count = 0;
  910. for (i = 0; i < count; ++i) {
  911. fence = entries[i].fence;
  912. if (!fence)
  913. continue;
  914. if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
  915. dma_fence_is_signaled(fence) ||
  916. (!entries[i].fence_cb.func &&
  917. dma_fence_add_callback(fence,
  918. &entries[i].fence_cb,
  919. syncobj_wait_fence_func))) {
  920. /* The fence has been signaled */
  921. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
  922. signaled_count++;
  923. } else {
  924. if (idx)
  925. *idx = i;
  926. goto done_waiting;
  927. }
  928. }
  929. }
  930. if (signaled_count == count)
  931. goto done_waiting;
  932. if (timeout == 0) {
  933. timeout = -ETIME;
  934. goto done_waiting;
  935. }
  936. if (signal_pending(current)) {
  937. timeout = -ERESTARTSYS;
  938. goto done_waiting;
  939. }
  940. timeout = schedule_timeout(timeout);
  941. } while (1);
  942. done_waiting:
  943. __set_current_state(TASK_RUNNING);
  944. cleanup_entries:
  945. for (i = 0; i < count; ++i) {
  946. drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
  947. if (entries[i].fence_cb.func)
  948. dma_fence_remove_callback(entries[i].fence,
  949. &entries[i].fence_cb);
  950. dma_fence_put(entries[i].fence);
  951. }
  952. kfree(entries);
  953. err_free_points:
  954. kfree(points);
  955. return timeout;
  956. }
  957. /**
  958. * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
  959. *
  960. * @timeout_nsec: timeout nsec component in ns, 0 for poll
  961. *
  962. * Calculate the timeout in jiffies from an absolute time in sec/nsec.
  963. */
  964. signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
  965. {
  966. ktime_t abs_timeout, now;
  967. u64 timeout_ns, timeout_jiffies64;
  968. /* make 0 timeout means poll - absolute 0 doesn't seem valid */
  969. if (timeout_nsec == 0)
  970. return 0;
  971. abs_timeout = ns_to_ktime(timeout_nsec);
  972. now = ktime_get();
  973. if (!ktime_after(abs_timeout, now))
  974. return 0;
  975. timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
  976. timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
  977. /* clamp timeout to avoid infinite timeout */
  978. if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
  979. return MAX_SCHEDULE_TIMEOUT - 1;
  980. return timeout_jiffies64 + 1;
  981. }
  982. EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
  983. static int drm_syncobj_array_wait(struct drm_device *dev,
  984. struct drm_file *file_private,
  985. struct drm_syncobj_wait *wait,
  986. struct drm_syncobj_timeline_wait *timeline_wait,
  987. struct drm_syncobj **syncobjs, bool timeline)
  988. {
  989. signed long timeout = 0;
  990. uint32_t first = ~0;
  991. if (!timeline) {
  992. timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
  993. timeout = drm_syncobj_array_wait_timeout(syncobjs,
  994. NULL,
  995. wait->count_handles,
  996. wait->flags,
  997. timeout, &first);
  998. if (timeout < 0)
  999. return timeout;
  1000. wait->first_signaled = first;
  1001. } else {
  1002. timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
  1003. timeout = drm_syncobj_array_wait_timeout(syncobjs,
  1004. u64_to_user_ptr(timeline_wait->points),
  1005. timeline_wait->count_handles,
  1006. timeline_wait->flags,
  1007. timeout, &first);
  1008. if (timeout < 0)
  1009. return timeout;
  1010. timeline_wait->first_signaled = first;
  1011. }
  1012. return 0;
  1013. }
  1014. static int drm_syncobj_array_find(struct drm_file *file_private,
  1015. void __user *user_handles,
  1016. uint32_t count_handles,
  1017. struct drm_syncobj ***syncobjs_out)
  1018. {
  1019. uint32_t i, *handles;
  1020. struct drm_syncobj **syncobjs;
  1021. int ret;
  1022. handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
  1023. if (handles == NULL)
  1024. return -ENOMEM;
  1025. if (copy_from_user(handles, user_handles,
  1026. sizeof(uint32_t) * count_handles)) {
  1027. ret = -EFAULT;
  1028. goto err_free_handles;
  1029. }
  1030. syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
  1031. if (syncobjs == NULL) {
  1032. ret = -ENOMEM;
  1033. goto err_free_handles;
  1034. }
  1035. for (i = 0; i < count_handles; i++) {
  1036. syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
  1037. if (!syncobjs[i]) {
  1038. ret = -ENOENT;
  1039. goto err_put_syncobjs;
  1040. }
  1041. }
  1042. kfree(handles);
  1043. *syncobjs_out = syncobjs;
  1044. return 0;
  1045. err_put_syncobjs:
  1046. while (i-- > 0)
  1047. drm_syncobj_put(syncobjs[i]);
  1048. kfree(syncobjs);
  1049. err_free_handles:
  1050. kfree(handles);
  1051. return ret;
  1052. }
  1053. static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
  1054. uint32_t count)
  1055. {
  1056. uint32_t i;
  1057. for (i = 0; i < count; i++)
  1058. drm_syncobj_put(syncobjs[i]);
  1059. kfree(syncobjs);
  1060. }
  1061. int
  1062. drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
  1063. struct drm_file *file_private)
  1064. {
  1065. struct drm_syncobj_wait *args = data;
  1066. struct drm_syncobj **syncobjs;
  1067. int ret = 0;
  1068. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  1069. return -EOPNOTSUPP;
  1070. if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
  1071. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
  1072. return -EINVAL;
  1073. if (args->count_handles == 0)
  1074. return -EINVAL;
  1075. ret = drm_syncobj_array_find(file_private,
  1076. u64_to_user_ptr(args->handles),
  1077. args->count_handles,
  1078. &syncobjs);
  1079. if (ret < 0)
  1080. return ret;
  1081. ret = drm_syncobj_array_wait(dev, file_private,
  1082. args, NULL, syncobjs, false);
  1083. drm_syncobj_array_free(syncobjs, args->count_handles);
  1084. return ret;
  1085. }
  1086. int
  1087. drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
  1088. struct drm_file *file_private)
  1089. {
  1090. struct drm_syncobj_timeline_wait *args = data;
  1091. struct drm_syncobj **syncobjs;
  1092. int ret = 0;
  1093. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  1094. return -EOPNOTSUPP;
  1095. if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
  1096. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
  1097. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
  1098. return -EINVAL;
  1099. if (args->count_handles == 0)
  1100. return -EINVAL;
  1101. ret = drm_syncobj_array_find(file_private,
  1102. u64_to_user_ptr(args->handles),
  1103. args->count_handles,
  1104. &syncobjs);
  1105. if (ret < 0)
  1106. return ret;
  1107. ret = drm_syncobj_array_wait(dev, file_private,
  1108. NULL, args, syncobjs, true);
  1109. drm_syncobj_array_free(syncobjs, args->count_handles);
  1110. return ret;
  1111. }
  1112. int
  1113. drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
  1114. struct drm_file *file_private)
  1115. {
  1116. struct drm_syncobj_array *args = data;
  1117. struct drm_syncobj **syncobjs;
  1118. uint32_t i;
  1119. int ret;
  1120. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  1121. return -EOPNOTSUPP;
  1122. if (args->pad != 0)
  1123. return -EINVAL;
  1124. if (args->count_handles == 0)
  1125. return -EINVAL;
  1126. ret = drm_syncobj_array_find(file_private,
  1127. u64_to_user_ptr(args->handles),
  1128. args->count_handles,
  1129. &syncobjs);
  1130. if (ret < 0)
  1131. return ret;
  1132. for (i = 0; i < args->count_handles; i++)
  1133. drm_syncobj_replace_fence(syncobjs[i], NULL);
  1134. drm_syncobj_array_free(syncobjs, args->count_handles);
  1135. return 0;
  1136. }
  1137. int
  1138. drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
  1139. struct drm_file *file_private)
  1140. {
  1141. struct drm_syncobj_array *args = data;
  1142. struct drm_syncobj **syncobjs;
  1143. uint32_t i;
  1144. int ret;
  1145. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  1146. return -EOPNOTSUPP;
  1147. if (args->pad != 0)
  1148. return -EINVAL;
  1149. if (args->count_handles == 0)
  1150. return -EINVAL;
  1151. ret = drm_syncobj_array_find(file_private,
  1152. u64_to_user_ptr(args->handles),
  1153. args->count_handles,
  1154. &syncobjs);
  1155. if (ret < 0)
  1156. return ret;
  1157. for (i = 0; i < args->count_handles; i++) {
  1158. ret = drm_syncobj_assign_null_handle(syncobjs[i]);
  1159. if (ret < 0)
  1160. break;
  1161. }
  1162. drm_syncobj_array_free(syncobjs, args->count_handles);
  1163. return ret;
  1164. }
  1165. int
  1166. drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
  1167. struct drm_file *file_private)
  1168. {
  1169. struct drm_syncobj_timeline_array *args = data;
  1170. struct drm_syncobj **syncobjs;
  1171. struct dma_fence_chain **chains;
  1172. uint64_t *points;
  1173. uint32_t i, j;
  1174. int ret;
  1175. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  1176. return -EOPNOTSUPP;
  1177. if (args->flags != 0)
  1178. return -EINVAL;
  1179. if (args->count_handles == 0)
  1180. return -EINVAL;
  1181. ret = drm_syncobj_array_find(file_private,
  1182. u64_to_user_ptr(args->handles),
  1183. args->count_handles,
  1184. &syncobjs);
  1185. if (ret < 0)
  1186. return ret;
  1187. points = kmalloc_array(args->count_handles, sizeof(*points),
  1188. GFP_KERNEL);
  1189. if (!points) {
  1190. ret = -ENOMEM;
  1191. goto out;
  1192. }
  1193. if (!u64_to_user_ptr(args->points)) {
  1194. memset(points, 0, args->count_handles * sizeof(uint64_t));
  1195. } else if (copy_from_user(points, u64_to_user_ptr(args->points),
  1196. sizeof(uint64_t) * args->count_handles)) {
  1197. ret = -EFAULT;
  1198. goto err_points;
  1199. }
  1200. chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
  1201. if (!chains) {
  1202. ret = -ENOMEM;
  1203. goto err_points;
  1204. }
  1205. for (i = 0; i < args->count_handles; i++) {
  1206. chains[i] = dma_fence_chain_alloc();
  1207. if (!chains[i]) {
  1208. for (j = 0; j < i; j++)
  1209. dma_fence_chain_free(chains[j]);
  1210. ret = -ENOMEM;
  1211. goto err_chains;
  1212. }
  1213. }
  1214. for (i = 0; i < args->count_handles; i++) {
  1215. struct dma_fence *fence = dma_fence_get_stub();
  1216. drm_syncobj_add_point(syncobjs[i], chains[i],
  1217. fence, points[i]);
  1218. dma_fence_put(fence);
  1219. }
  1220. err_chains:
  1221. kfree(chains);
  1222. err_points:
  1223. kfree(points);
  1224. out:
  1225. drm_syncobj_array_free(syncobjs, args->count_handles);
  1226. return ret;
  1227. }
  1228. int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
  1229. struct drm_file *file_private)
  1230. {
  1231. struct drm_syncobj_timeline_array *args = data;
  1232. struct drm_syncobj **syncobjs;
  1233. uint64_t __user *points = u64_to_user_ptr(args->points);
  1234. uint32_t i;
  1235. int ret;
  1236. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  1237. return -EOPNOTSUPP;
  1238. if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
  1239. return -EINVAL;
  1240. if (args->count_handles == 0)
  1241. return -EINVAL;
  1242. ret = drm_syncobj_array_find(file_private,
  1243. u64_to_user_ptr(args->handles),
  1244. args->count_handles,
  1245. &syncobjs);
  1246. if (ret < 0)
  1247. return ret;
  1248. for (i = 0; i < args->count_handles; i++) {
  1249. struct dma_fence_chain *chain;
  1250. struct dma_fence *fence;
  1251. uint64_t point;
  1252. fence = drm_syncobj_fence_get(syncobjs[i]);
  1253. chain = to_dma_fence_chain(fence);
  1254. if (chain) {
  1255. struct dma_fence *iter, *last_signaled =
  1256. dma_fence_get(fence);
  1257. if (args->flags &
  1258. DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
  1259. point = fence->seqno;
  1260. } else {
  1261. dma_fence_chain_for_each(iter, fence) {
  1262. if (iter->context != fence->context) {
  1263. dma_fence_put(iter);
  1264. /* It is most likely that timeline has
  1265. * unorder points. */
  1266. break;
  1267. }
  1268. dma_fence_put(last_signaled);
  1269. last_signaled = dma_fence_get(iter);
  1270. }
  1271. point = dma_fence_is_signaled(last_signaled) ?
  1272. last_signaled->seqno :
  1273. to_dma_fence_chain(last_signaled)->prev_seqno;
  1274. }
  1275. dma_fence_put(last_signaled);
  1276. } else {
  1277. point = 0;
  1278. }
  1279. dma_fence_put(fence);
  1280. ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
  1281. ret = ret ? -EFAULT : 0;
  1282. if (ret)
  1283. break;
  1284. }
  1285. drm_syncobj_array_free(syncobjs, args->count_handles);
  1286. return ret;
  1287. }