gntdev-dmabuf.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xen dma-buf functionality for gntdev.
  4. *
  5. * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
  6. *
  7. * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/dma-buf.h>
  12. #include <linux/slab.h>
  13. #include <linux/types.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <xen/xen.h>
  17. #include <xen/grant_table.h>
  18. #include "gntdev-common.h"
  19. #include "gntdev-dmabuf.h"
  20. MODULE_IMPORT_NS(DMA_BUF);
  21. struct gntdev_dmabuf {
  22. struct gntdev_dmabuf_priv *priv;
  23. struct dma_buf *dmabuf;
  24. struct list_head next;
  25. int fd;
  26. union {
  27. struct {
  28. /* Exported buffers are reference counted. */
  29. struct kref refcount;
  30. struct gntdev_priv *priv;
  31. struct gntdev_grant_map *map;
  32. } exp;
  33. struct {
  34. /* Granted references of the imported buffer. */
  35. grant_ref_t *refs;
  36. /* Scatter-gather table of the imported buffer. */
  37. struct sg_table *sgt;
  38. /* dma-buf attachment of the imported buffer. */
  39. struct dma_buf_attachment *attach;
  40. } imp;
  41. } u;
  42. /* Number of pages this buffer has. */
  43. int nr_pages;
  44. /* Pages of this buffer. */
  45. struct page **pages;
  46. };
  47. struct gntdev_dmabuf_wait_obj {
  48. struct list_head next;
  49. struct gntdev_dmabuf *gntdev_dmabuf;
  50. struct completion completion;
  51. };
  52. struct gntdev_dmabuf_attachment {
  53. struct sg_table *sgt;
  54. enum dma_data_direction dir;
  55. };
  56. struct gntdev_dmabuf_priv {
  57. /* List of exported DMA buffers. */
  58. struct list_head exp_list;
  59. /* List of wait objects. */
  60. struct list_head exp_wait_list;
  61. /* List of imported DMA buffers. */
  62. struct list_head imp_list;
  63. /* This is the lock which protects dma_buf_xxx lists. */
  64. struct mutex lock;
  65. /*
  66. * We reference this file while exporting dma-bufs, so
  67. * the grant device context is not destroyed while there are
  68. * external users alive.
  69. */
  70. struct file *filp;
  71. };
  72. /* DMA buffer export support. */
  73. /* Implementation of wait for exported DMA buffer to be released. */
  74. static void dmabuf_exp_release(struct kref *kref);
  75. static struct gntdev_dmabuf_wait_obj *
  76. dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
  77. struct gntdev_dmabuf *gntdev_dmabuf)
  78. {
  79. struct gntdev_dmabuf_wait_obj *obj;
  80. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  81. if (!obj)
  82. return ERR_PTR(-ENOMEM);
  83. init_completion(&obj->completion);
  84. obj->gntdev_dmabuf = gntdev_dmabuf;
  85. mutex_lock(&priv->lock);
  86. list_add(&obj->next, &priv->exp_wait_list);
  87. /* Put our reference and wait for gntdev_dmabuf's release to fire. */
  88. kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
  89. mutex_unlock(&priv->lock);
  90. return obj;
  91. }
  92. static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
  93. struct gntdev_dmabuf_wait_obj *obj)
  94. {
  95. mutex_lock(&priv->lock);
  96. list_del(&obj->next);
  97. mutex_unlock(&priv->lock);
  98. kfree(obj);
  99. }
  100. static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
  101. u32 wait_to_ms)
  102. {
  103. if (wait_for_completion_timeout(&obj->completion,
  104. msecs_to_jiffies(wait_to_ms)) <= 0)
  105. return -ETIMEDOUT;
  106. return 0;
  107. }
  108. static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
  109. struct gntdev_dmabuf *gntdev_dmabuf)
  110. {
  111. struct gntdev_dmabuf_wait_obj *obj;
  112. list_for_each_entry(obj, &priv->exp_wait_list, next)
  113. if (obj->gntdev_dmabuf == gntdev_dmabuf) {
  114. pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
  115. complete_all(&obj->completion);
  116. break;
  117. }
  118. }
  119. static struct gntdev_dmabuf *
  120. dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
  121. {
  122. struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
  123. mutex_lock(&priv->lock);
  124. list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
  125. if (gntdev_dmabuf->fd == fd) {
  126. pr_debug("Found gntdev_dmabuf in the wait list\n");
  127. kref_get(&gntdev_dmabuf->u.exp.refcount);
  128. ret = gntdev_dmabuf;
  129. break;
  130. }
  131. mutex_unlock(&priv->lock);
  132. return ret;
  133. }
  134. static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
  135. int wait_to_ms)
  136. {
  137. struct gntdev_dmabuf *gntdev_dmabuf;
  138. struct gntdev_dmabuf_wait_obj *obj;
  139. int ret;
  140. pr_debug("Will wait for dma-buf with fd %d\n", fd);
  141. /*
  142. * Try to find the DMA buffer: if not found means that
  143. * either the buffer has already been released or file descriptor
  144. * provided is wrong.
  145. */
  146. gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
  147. if (IS_ERR(gntdev_dmabuf))
  148. return PTR_ERR(gntdev_dmabuf);
  149. /*
  150. * gntdev_dmabuf still exists and is reference count locked by us now,
  151. * so prepare to wait: allocate wait object and add it to the wait list,
  152. * so we can find it on release.
  153. */
  154. obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
  155. if (IS_ERR(obj))
  156. return PTR_ERR(obj);
  157. ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
  158. dmabuf_exp_wait_obj_free(priv, obj);
  159. return ret;
  160. }
  161. /* DMA buffer export support. */
  162. static struct sg_table *
  163. dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
  164. {
  165. struct sg_table *sgt;
  166. int ret;
  167. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  168. if (!sgt) {
  169. ret = -ENOMEM;
  170. goto out;
  171. }
  172. ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
  173. nr_pages << PAGE_SHIFT,
  174. GFP_KERNEL);
  175. if (ret)
  176. goto out;
  177. return sgt;
  178. out:
  179. kfree(sgt);
  180. return ERR_PTR(ret);
  181. }
  182. static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
  183. struct dma_buf_attachment *attach)
  184. {
  185. struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
  186. gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
  187. GFP_KERNEL);
  188. if (!gntdev_dmabuf_attach)
  189. return -ENOMEM;
  190. gntdev_dmabuf_attach->dir = DMA_NONE;
  191. attach->priv = gntdev_dmabuf_attach;
  192. return 0;
  193. }
  194. static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
  195. struct dma_buf_attachment *attach)
  196. {
  197. struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
  198. if (gntdev_dmabuf_attach) {
  199. struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
  200. if (sgt) {
  201. if (gntdev_dmabuf_attach->dir != DMA_NONE)
  202. dma_unmap_sgtable(attach->dev, sgt,
  203. gntdev_dmabuf_attach->dir,
  204. DMA_ATTR_SKIP_CPU_SYNC);
  205. sg_free_table(sgt);
  206. }
  207. kfree(sgt);
  208. kfree(gntdev_dmabuf_attach);
  209. attach->priv = NULL;
  210. }
  211. }
  212. static struct sg_table *
  213. dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
  214. enum dma_data_direction dir)
  215. {
  216. struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
  217. struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
  218. struct sg_table *sgt;
  219. pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
  220. attach->dev);
  221. if (dir == DMA_NONE || !gntdev_dmabuf_attach)
  222. return ERR_PTR(-EINVAL);
  223. /* Return the cached mapping when possible. */
  224. if (gntdev_dmabuf_attach->dir == dir)
  225. return gntdev_dmabuf_attach->sgt;
  226. /*
  227. * Two mappings with different directions for the same attachment are
  228. * not allowed.
  229. */
  230. if (gntdev_dmabuf_attach->dir != DMA_NONE)
  231. return ERR_PTR(-EBUSY);
  232. sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
  233. gntdev_dmabuf->nr_pages);
  234. if (!IS_ERR(sgt)) {
  235. if (dma_map_sgtable(attach->dev, sgt, dir,
  236. DMA_ATTR_SKIP_CPU_SYNC)) {
  237. sg_free_table(sgt);
  238. kfree(sgt);
  239. sgt = ERR_PTR(-ENOMEM);
  240. } else {
  241. gntdev_dmabuf_attach->sgt = sgt;
  242. gntdev_dmabuf_attach->dir = dir;
  243. }
  244. }
  245. if (IS_ERR(sgt))
  246. pr_debug("Failed to map sg table for dev %p\n", attach->dev);
  247. return sgt;
  248. }
  249. static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
  250. struct sg_table *sgt,
  251. enum dma_data_direction dir)
  252. {
  253. /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
  254. }
  255. static void dmabuf_exp_release(struct kref *kref)
  256. {
  257. struct gntdev_dmabuf *gntdev_dmabuf =
  258. container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
  259. dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
  260. list_del(&gntdev_dmabuf->next);
  261. fput(gntdev_dmabuf->priv->filp);
  262. kfree(gntdev_dmabuf);
  263. }
  264. static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
  265. struct gntdev_grant_map *map)
  266. {
  267. mutex_lock(&priv->lock);
  268. list_del(&map->next);
  269. gntdev_put_map(NULL /* already removed */, map);
  270. mutex_unlock(&priv->lock);
  271. }
  272. static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
  273. {
  274. struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
  275. struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
  276. dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
  277. gntdev_dmabuf->u.exp.map);
  278. mutex_lock(&priv->lock);
  279. kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
  280. mutex_unlock(&priv->lock);
  281. }
  282. static const struct dma_buf_ops dmabuf_exp_ops = {
  283. .attach = dmabuf_exp_ops_attach,
  284. .detach = dmabuf_exp_ops_detach,
  285. .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
  286. .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
  287. .release = dmabuf_exp_ops_release,
  288. };
  289. struct gntdev_dmabuf_export_args {
  290. struct gntdev_priv *priv;
  291. struct gntdev_grant_map *map;
  292. struct gntdev_dmabuf_priv *dmabuf_priv;
  293. struct device *dev;
  294. int count;
  295. struct page **pages;
  296. u32 fd;
  297. };
  298. static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
  299. {
  300. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  301. struct gntdev_dmabuf *gntdev_dmabuf;
  302. int ret;
  303. gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
  304. if (!gntdev_dmabuf)
  305. return -ENOMEM;
  306. kref_init(&gntdev_dmabuf->u.exp.refcount);
  307. gntdev_dmabuf->priv = args->dmabuf_priv;
  308. gntdev_dmabuf->nr_pages = args->count;
  309. gntdev_dmabuf->pages = args->pages;
  310. gntdev_dmabuf->u.exp.priv = args->priv;
  311. gntdev_dmabuf->u.exp.map = args->map;
  312. exp_info.exp_name = KBUILD_MODNAME;
  313. if (args->dev->driver && args->dev->driver->owner)
  314. exp_info.owner = args->dev->driver->owner;
  315. else
  316. exp_info.owner = THIS_MODULE;
  317. exp_info.ops = &dmabuf_exp_ops;
  318. exp_info.size = args->count << PAGE_SHIFT;
  319. exp_info.flags = O_RDWR;
  320. exp_info.priv = gntdev_dmabuf;
  321. gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
  322. if (IS_ERR(gntdev_dmabuf->dmabuf)) {
  323. ret = PTR_ERR(gntdev_dmabuf->dmabuf);
  324. gntdev_dmabuf->dmabuf = NULL;
  325. goto fail;
  326. }
  327. ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
  328. if (ret < 0)
  329. goto fail;
  330. gntdev_dmabuf->fd = ret;
  331. args->fd = ret;
  332. pr_debug("Exporting DMA buffer with fd %d\n", ret);
  333. mutex_lock(&args->dmabuf_priv->lock);
  334. list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
  335. mutex_unlock(&args->dmabuf_priv->lock);
  336. get_file(gntdev_dmabuf->priv->filp);
  337. return 0;
  338. fail:
  339. if (gntdev_dmabuf->dmabuf)
  340. dma_buf_put(gntdev_dmabuf->dmabuf);
  341. kfree(gntdev_dmabuf);
  342. return ret;
  343. }
  344. static struct gntdev_grant_map *
  345. dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
  346. int count)
  347. {
  348. struct gntdev_grant_map *map;
  349. if (unlikely(gntdev_test_page_count(count)))
  350. return ERR_PTR(-EINVAL);
  351. if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
  352. (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
  353. pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
  354. return ERR_PTR(-EINVAL);
  355. }
  356. map = gntdev_alloc_map(priv, count, dmabuf_flags);
  357. if (!map)
  358. return ERR_PTR(-ENOMEM);
  359. return map;
  360. }
  361. static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
  362. int count, u32 domid, u32 *refs, u32 *fd)
  363. {
  364. struct gntdev_grant_map *map;
  365. struct gntdev_dmabuf_export_args args;
  366. int i, ret;
  367. map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
  368. if (IS_ERR(map))
  369. return PTR_ERR(map);
  370. for (i = 0; i < count; i++) {
  371. map->grants[i].domid = domid;
  372. map->grants[i].ref = refs[i];
  373. }
  374. mutex_lock(&priv->lock);
  375. gntdev_add_map(priv, map);
  376. mutex_unlock(&priv->lock);
  377. map->flags |= GNTMAP_host_map;
  378. #if defined(CONFIG_X86)
  379. map->flags |= GNTMAP_device_map;
  380. #endif
  381. ret = gntdev_map_grant_pages(map);
  382. if (ret < 0)
  383. goto out;
  384. args.priv = priv;
  385. args.map = map;
  386. args.dev = priv->dma_dev;
  387. args.dmabuf_priv = priv->dmabuf_priv;
  388. args.count = map->count;
  389. args.pages = map->pages;
  390. args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
  391. ret = dmabuf_exp_from_pages(&args);
  392. if (ret < 0)
  393. goto out;
  394. *fd = args.fd;
  395. return 0;
  396. out:
  397. dmabuf_exp_remove_map(priv, map);
  398. return ret;
  399. }
  400. /* DMA buffer import support. */
  401. static int
  402. dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
  403. int count, int domid)
  404. {
  405. grant_ref_t priv_gref_head;
  406. int i, ret;
  407. ret = gnttab_alloc_grant_references(count, &priv_gref_head);
  408. if (ret < 0) {
  409. pr_debug("Cannot allocate grant references, ret %d\n", ret);
  410. return ret;
  411. }
  412. for (i = 0; i < count; i++) {
  413. int cur_ref;
  414. cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
  415. if (cur_ref < 0) {
  416. ret = cur_ref;
  417. pr_debug("Cannot claim grant reference, ret %d\n", ret);
  418. goto out;
  419. }
  420. gnttab_grant_foreign_access_ref(cur_ref, domid,
  421. xen_page_to_gfn(pages[i]), 0);
  422. refs[i] = cur_ref;
  423. }
  424. return 0;
  425. out:
  426. gnttab_free_grant_references(priv_gref_head);
  427. return ret;
  428. }
  429. static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
  430. {
  431. int i;
  432. for (i = 0; i < count; i++)
  433. if (refs[i] != INVALID_GRANT_REF)
  434. gnttab_end_foreign_access(refs[i], NULL);
  435. }
  436. static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
  437. {
  438. kfree(gntdev_dmabuf->pages);
  439. kfree(gntdev_dmabuf->u.imp.refs);
  440. kfree(gntdev_dmabuf);
  441. }
  442. static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
  443. {
  444. struct gntdev_dmabuf *gntdev_dmabuf;
  445. int i;
  446. gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
  447. if (!gntdev_dmabuf)
  448. goto fail_no_free;
  449. gntdev_dmabuf->u.imp.refs = kcalloc(count,
  450. sizeof(gntdev_dmabuf->u.imp.refs[0]),
  451. GFP_KERNEL);
  452. if (!gntdev_dmabuf->u.imp.refs)
  453. goto fail;
  454. gntdev_dmabuf->pages = kcalloc(count,
  455. sizeof(gntdev_dmabuf->pages[0]),
  456. GFP_KERNEL);
  457. if (!gntdev_dmabuf->pages)
  458. goto fail;
  459. gntdev_dmabuf->nr_pages = count;
  460. for (i = 0; i < count; i++)
  461. gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
  462. return gntdev_dmabuf;
  463. fail:
  464. dmabuf_imp_free_storage(gntdev_dmabuf);
  465. fail_no_free:
  466. return ERR_PTR(-ENOMEM);
  467. }
  468. static struct gntdev_dmabuf *
  469. dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
  470. int fd, int count, int domid)
  471. {
  472. struct gntdev_dmabuf *gntdev_dmabuf, *ret;
  473. struct dma_buf *dma_buf;
  474. struct dma_buf_attachment *attach;
  475. struct sg_table *sgt;
  476. struct sg_page_iter sg_iter;
  477. int i;
  478. dma_buf = dma_buf_get(fd);
  479. if (IS_ERR(dma_buf))
  480. return ERR_CAST(dma_buf);
  481. gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
  482. if (IS_ERR(gntdev_dmabuf)) {
  483. ret = gntdev_dmabuf;
  484. goto fail_put;
  485. }
  486. gntdev_dmabuf->priv = priv;
  487. gntdev_dmabuf->fd = fd;
  488. attach = dma_buf_attach(dma_buf, dev);
  489. if (IS_ERR(attach)) {
  490. ret = ERR_CAST(attach);
  491. goto fail_free_obj;
  492. }
  493. gntdev_dmabuf->u.imp.attach = attach;
  494. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  495. if (IS_ERR(sgt)) {
  496. ret = ERR_CAST(sgt);
  497. goto fail_detach;
  498. }
  499. /* Check that we have zero offset. */
  500. if (sgt->sgl->offset) {
  501. ret = ERR_PTR(-EINVAL);
  502. pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
  503. sgt->sgl->offset);
  504. goto fail_unmap;
  505. }
  506. /* Check number of pages that imported buffer has. */
  507. if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
  508. ret = ERR_PTR(-EINVAL);
  509. pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
  510. attach->dmabuf->size, gntdev_dmabuf->nr_pages);
  511. goto fail_unmap;
  512. }
  513. gntdev_dmabuf->u.imp.sgt = sgt;
  514. /* Now convert sgt to array of pages and check for page validity. */
  515. i = 0;
  516. for_each_sgtable_page(sgt, &sg_iter, 0) {
  517. struct page *page = sg_page_iter_page(&sg_iter);
  518. /*
  519. * Check if page is valid: this can happen if we are given
  520. * a page from VRAM or other resources which are not backed
  521. * by a struct page.
  522. */
  523. if (!pfn_valid(page_to_pfn(page))) {
  524. ret = ERR_PTR(-EINVAL);
  525. goto fail_unmap;
  526. }
  527. gntdev_dmabuf->pages[i++] = page;
  528. }
  529. ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
  530. gntdev_dmabuf->u.imp.refs,
  531. count, domid));
  532. if (IS_ERR(ret))
  533. goto fail_end_access;
  534. pr_debug("Imported DMA buffer with fd %d\n", fd);
  535. mutex_lock(&priv->lock);
  536. list_add(&gntdev_dmabuf->next, &priv->imp_list);
  537. mutex_unlock(&priv->lock);
  538. return gntdev_dmabuf;
  539. fail_end_access:
  540. dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
  541. fail_unmap:
  542. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  543. fail_detach:
  544. dma_buf_detach(dma_buf, attach);
  545. fail_free_obj:
  546. dmabuf_imp_free_storage(gntdev_dmabuf);
  547. fail_put:
  548. dma_buf_put(dma_buf);
  549. return ret;
  550. }
  551. /*
  552. * Find the hyper dma-buf by its file descriptor and remove
  553. * it from the buffer's list.
  554. */
  555. static struct gntdev_dmabuf *
  556. dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
  557. {
  558. struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
  559. mutex_lock(&priv->lock);
  560. list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
  561. if (gntdev_dmabuf->fd == fd) {
  562. pr_debug("Found gntdev_dmabuf in the import list\n");
  563. ret = gntdev_dmabuf;
  564. list_del(&gntdev_dmabuf->next);
  565. break;
  566. }
  567. }
  568. mutex_unlock(&priv->lock);
  569. return ret;
  570. }
  571. static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
  572. {
  573. struct gntdev_dmabuf *gntdev_dmabuf;
  574. struct dma_buf_attachment *attach;
  575. struct dma_buf *dma_buf;
  576. gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
  577. if (IS_ERR(gntdev_dmabuf))
  578. return PTR_ERR(gntdev_dmabuf);
  579. pr_debug("Releasing DMA buffer with fd %d\n", fd);
  580. dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
  581. gntdev_dmabuf->nr_pages);
  582. attach = gntdev_dmabuf->u.imp.attach;
  583. if (gntdev_dmabuf->u.imp.sgt)
  584. dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
  585. DMA_BIDIRECTIONAL);
  586. dma_buf = attach->dmabuf;
  587. dma_buf_detach(attach->dmabuf, attach);
  588. dma_buf_put(dma_buf);
  589. dmabuf_imp_free_storage(gntdev_dmabuf);
  590. return 0;
  591. }
  592. static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
  593. {
  594. struct gntdev_dmabuf *q, *gntdev_dmabuf;
  595. list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
  596. dmabuf_imp_release(priv, gntdev_dmabuf->fd);
  597. }
  598. /* DMA buffer IOCTL support. */
  599. long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
  600. struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
  601. {
  602. struct ioctl_gntdev_dmabuf_exp_from_refs op;
  603. u32 *refs;
  604. long ret;
  605. if (use_ptemod) {
  606. pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
  607. use_ptemod);
  608. return -EINVAL;
  609. }
  610. if (copy_from_user(&op, u, sizeof(op)) != 0)
  611. return -EFAULT;
  612. if (unlikely(gntdev_test_page_count(op.count)))
  613. return -EINVAL;
  614. refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
  615. if (!refs)
  616. return -ENOMEM;
  617. if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
  618. ret = -EFAULT;
  619. goto out;
  620. }
  621. ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
  622. op.domid, refs, &op.fd);
  623. if (ret)
  624. goto out;
  625. if (copy_to_user(u, &op, sizeof(op)) != 0)
  626. ret = -EFAULT;
  627. out:
  628. kfree(refs);
  629. return ret;
  630. }
  631. long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
  632. struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
  633. {
  634. struct ioctl_gntdev_dmabuf_exp_wait_released op;
  635. if (copy_from_user(&op, u, sizeof(op)) != 0)
  636. return -EFAULT;
  637. return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
  638. op.wait_to_ms);
  639. }
  640. long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
  641. struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
  642. {
  643. struct ioctl_gntdev_dmabuf_imp_to_refs op;
  644. struct gntdev_dmabuf *gntdev_dmabuf;
  645. long ret;
  646. if (copy_from_user(&op, u, sizeof(op)) != 0)
  647. return -EFAULT;
  648. if (unlikely(gntdev_test_page_count(op.count)))
  649. return -EINVAL;
  650. gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
  651. priv->dma_dev, op.fd,
  652. op.count, op.domid);
  653. if (IS_ERR(gntdev_dmabuf))
  654. return PTR_ERR(gntdev_dmabuf);
  655. if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
  656. sizeof(*u->refs) * op.count) != 0) {
  657. ret = -EFAULT;
  658. goto out_release;
  659. }
  660. return 0;
  661. out_release:
  662. dmabuf_imp_release(priv->dmabuf_priv, op.fd);
  663. return ret;
  664. }
  665. long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
  666. struct ioctl_gntdev_dmabuf_imp_release __user *u)
  667. {
  668. struct ioctl_gntdev_dmabuf_imp_release op;
  669. if (copy_from_user(&op, u, sizeof(op)) != 0)
  670. return -EFAULT;
  671. return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
  672. }
  673. struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
  674. {
  675. struct gntdev_dmabuf_priv *priv;
  676. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  677. if (!priv)
  678. return ERR_PTR(-ENOMEM);
  679. mutex_init(&priv->lock);
  680. INIT_LIST_HEAD(&priv->exp_list);
  681. INIT_LIST_HEAD(&priv->exp_wait_list);
  682. INIT_LIST_HEAD(&priv->imp_list);
  683. priv->filp = filp;
  684. return priv;
  685. }
  686. void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
  687. {
  688. dmabuf_imp_release_all(priv);
  689. kfree(priv);
  690. }