virtgpu_gem.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <drm/drm_file.h>
  26. #include <drm/drm_fourcc.h>
  27. #include "virtgpu_drv.h"
  28. static int virtio_gpu_gem_create(struct drm_file *file,
  29. struct drm_device *dev,
  30. struct virtio_gpu_object_params *params,
  31. struct drm_gem_object **obj_p,
  32. uint32_t *handle_p)
  33. {
  34. struct virtio_gpu_device *vgdev = dev->dev_private;
  35. struct virtio_gpu_object *obj;
  36. int ret;
  37. u32 handle;
  38. ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
  39. if (ret < 0)
  40. return ret;
  41. ret = drm_gem_handle_create(file, &obj->base.base, &handle);
  42. if (ret) {
  43. drm_gem_object_release(&obj->base.base);
  44. return ret;
  45. }
  46. *obj_p = &obj->base.base;
  47. /* drop reference from allocate - handle holds it now */
  48. drm_gem_object_put(&obj->base.base);
  49. *handle_p = handle;
  50. return 0;
  51. }
  52. int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
  53. struct drm_device *dev,
  54. struct drm_mode_create_dumb *args)
  55. {
  56. struct drm_gem_object *gobj;
  57. struct virtio_gpu_object_params params = { 0 };
  58. struct virtio_gpu_device *vgdev = dev->dev_private;
  59. int ret;
  60. uint32_t pitch;
  61. if (args->bpp != 32)
  62. return -EINVAL;
  63. pitch = args->width * 4;
  64. args->size = pitch * args->height;
  65. args->size = ALIGN(args->size, PAGE_SIZE);
  66. params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
  67. params.width = args->width;
  68. params.height = args->height;
  69. params.size = args->size;
  70. params.dumb = true;
  71. if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) {
  72. params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
  73. params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
  74. params.blob = true;
  75. }
  76. ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
  77. &args->handle);
  78. if (ret)
  79. goto fail;
  80. args->pitch = pitch;
  81. return ret;
  82. fail:
  83. return ret;
  84. }
  85. int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
  86. struct drm_device *dev,
  87. uint32_t handle, uint64_t *offset_p)
  88. {
  89. struct drm_gem_object *gobj;
  90. BUG_ON(!offset_p);
  91. gobj = drm_gem_object_lookup(file_priv, handle);
  92. if (gobj == NULL)
  93. return -ENOENT;
  94. *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
  95. drm_gem_object_put(gobj);
  96. return 0;
  97. }
  98. int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
  99. struct drm_file *file)
  100. {
  101. struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  102. struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  103. struct virtio_gpu_object_array *objs;
  104. if (!vgdev->has_virgl_3d)
  105. goto out_notify;
  106. /* the context might still be missing when the first ioctl is
  107. * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
  108. */
  109. virtio_gpu_create_context(obj->dev, file);
  110. objs = virtio_gpu_array_alloc(1);
  111. if (!objs)
  112. return -ENOMEM;
  113. virtio_gpu_array_add_obj(objs, obj);
  114. virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
  115. objs);
  116. out_notify:
  117. virtio_gpu_notify(vgdev);
  118. return 0;
  119. }
  120. void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
  121. struct drm_file *file)
  122. {
  123. struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  124. struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  125. struct virtio_gpu_object_array *objs;
  126. if (!vgdev->has_virgl_3d)
  127. return;
  128. objs = virtio_gpu_array_alloc(1);
  129. if (!objs)
  130. return;
  131. virtio_gpu_array_add_obj(objs, obj);
  132. virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
  133. objs);
  134. virtio_gpu_notify(vgdev);
  135. }
  136. struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
  137. {
  138. struct virtio_gpu_object_array *objs;
  139. objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
  140. if (!objs)
  141. return NULL;
  142. objs->nents = 0;
  143. objs->total = nents;
  144. return objs;
  145. }
  146. static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
  147. {
  148. kfree(objs);
  149. }
  150. struct virtio_gpu_object_array*
  151. virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
  152. {
  153. struct virtio_gpu_object_array *objs;
  154. u32 i;
  155. objs = virtio_gpu_array_alloc(nents);
  156. if (!objs)
  157. return NULL;
  158. for (i = 0; i < nents; i++) {
  159. objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
  160. if (!objs->objs[i]) {
  161. objs->nents = i;
  162. virtio_gpu_array_put_free(objs);
  163. return NULL;
  164. }
  165. }
  166. objs->nents = i;
  167. return objs;
  168. }
  169. void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
  170. struct drm_gem_object *obj)
  171. {
  172. if (WARN_ON_ONCE(objs->nents == objs->total))
  173. return;
  174. drm_gem_object_get(obj);
  175. objs->objs[objs->nents] = obj;
  176. objs->nents++;
  177. }
  178. int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
  179. {
  180. unsigned int i;
  181. int ret;
  182. if (objs->nents == 1) {
  183. ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
  184. } else {
  185. ret = drm_gem_lock_reservations(objs->objs, objs->nents,
  186. &objs->ticket);
  187. }
  188. if (ret)
  189. return ret;
  190. for (i = 0; i < objs->nents; ++i) {
  191. ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
  192. if (ret) {
  193. virtio_gpu_array_unlock_resv(objs);
  194. return ret;
  195. }
  196. }
  197. return ret;
  198. }
  199. void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
  200. {
  201. if (objs->nents == 1) {
  202. dma_resv_unlock(objs->objs[0]->resv);
  203. } else {
  204. drm_gem_unlock_reservations(objs->objs, objs->nents,
  205. &objs->ticket);
  206. }
  207. }
  208. void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
  209. struct dma_fence *fence)
  210. {
  211. int i;
  212. for (i = 0; i < objs->nents; i++)
  213. dma_resv_add_fence(objs->objs[i]->resv, fence,
  214. DMA_RESV_USAGE_WRITE);
  215. }
  216. void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
  217. {
  218. u32 i;
  219. if (!objs)
  220. return;
  221. for (i = 0; i < objs->nents; i++)
  222. drm_gem_object_put(objs->objs[i]);
  223. virtio_gpu_array_free(objs);
  224. }
  225. void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
  226. struct virtio_gpu_object_array *objs)
  227. {
  228. spin_lock(&vgdev->obj_free_lock);
  229. list_add_tail(&objs->next, &vgdev->obj_free_list);
  230. spin_unlock(&vgdev->obj_free_lock);
  231. schedule_work(&vgdev->obj_free_work);
  232. }
  233. void virtio_gpu_array_put_free_work(struct work_struct *work)
  234. {
  235. struct virtio_gpu_device *vgdev =
  236. container_of(work, struct virtio_gpu_device, obj_free_work);
  237. struct virtio_gpu_object_array *objs;
  238. spin_lock(&vgdev->obj_free_lock);
  239. while (!list_empty(&vgdev->obj_free_list)) {
  240. objs = list_first_entry(&vgdev->obj_free_list,
  241. struct virtio_gpu_object_array, next);
  242. list_del(&objs->next);
  243. spin_unlock(&vgdev->obj_free_lock);
  244. virtio_gpu_array_put_free(objs);
  245. spin_lock(&vgdev->obj_free_lock);
  246. }
  247. spin_unlock(&vgdev->obj_free_lock);
  248. }