virtgpu_object.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <linux/dma-mapping.h>
  26. #include <linux/moduleparam.h>
  27. #include "virtgpu_drv.h"
  28. static int virtio_gpu_virglrenderer_workaround = 1;
  29. module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
  30. int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
  31. {
  32. if (virtio_gpu_virglrenderer_workaround) {
  33. /*
  34. * Hack to avoid re-using resource IDs.
  35. *
  36. * virglrenderer versions up to (and including) 0.7.0
  37. * can't deal with that. virglrenderer commit
  38. * "f91a9dd35715 Fix unlinking resources from hash
  39. * table." (Feb 2019) fixes the bug.
  40. */
  41. static atomic_t seqno = ATOMIC_INIT(0);
  42. int handle = atomic_inc_return(&seqno);
  43. *resid = handle + 1;
  44. } else {
  45. int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
  46. if (handle < 0)
  47. return handle;
  48. *resid = handle + 1;
  49. }
  50. return 0;
  51. }
  52. static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
  53. {
  54. if (!virtio_gpu_virglrenderer_workaround) {
  55. ida_free(&vgdev->resource_ida, id - 1);
  56. }
  57. }
  58. void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
  59. {
  60. struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  61. virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
  62. if (virtio_gpu_is_shmem(bo)) {
  63. drm_gem_shmem_free(&bo->base);
  64. } else if (virtio_gpu_is_vram(bo)) {
  65. struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  66. spin_lock(&vgdev->host_visible_lock);
  67. if (drm_mm_node_allocated(&vram->vram_node))
  68. drm_mm_remove_node(&vram->vram_node);
  69. spin_unlock(&vgdev->host_visible_lock);
  70. drm_gem_free_mmap_offset(&vram->base.base.base);
  71. drm_gem_object_release(&vram->base.base.base);
  72. kfree(vram);
  73. }
  74. }
  75. static void virtio_gpu_free_object(struct drm_gem_object *obj)
  76. {
  77. struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  78. struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  79. if (bo->created) {
  80. virtio_gpu_cmd_unref_resource(vgdev, bo);
  81. virtio_gpu_notify(vgdev);
  82. /* completion handler calls virtio_gpu_cleanup_object() */
  83. return;
  84. }
  85. virtio_gpu_cleanup_object(bo);
  86. }
  87. static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
  88. .free = virtio_gpu_free_object,
  89. .open = virtio_gpu_gem_object_open,
  90. .close = virtio_gpu_gem_object_close,
  91. .print_info = drm_gem_shmem_object_print_info,
  92. .export = virtgpu_gem_prime_export,
  93. .pin = drm_gem_shmem_object_pin,
  94. .unpin = drm_gem_shmem_object_unpin,
  95. .get_sg_table = drm_gem_shmem_object_get_sg_table,
  96. .vmap = drm_gem_shmem_object_vmap,
  97. .vunmap = drm_gem_shmem_object_vunmap,
  98. .mmap = drm_gem_shmem_object_mmap,
  99. .vm_ops = &drm_gem_shmem_vm_ops,
  100. };
  101. bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
  102. {
  103. return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
  104. }
  105. struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
  106. size_t size)
  107. {
  108. struct virtio_gpu_object_shmem *shmem;
  109. struct drm_gem_shmem_object *dshmem;
  110. shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
  111. if (!shmem)
  112. return ERR_PTR(-ENOMEM);
  113. dshmem = &shmem->base.base;
  114. dshmem->base.funcs = &virtio_gpu_shmem_funcs;
  115. return &dshmem->base;
  116. }
  117. static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
  118. struct virtio_gpu_object *bo,
  119. struct virtio_gpu_mem_entry **ents,
  120. unsigned int *nents)
  121. {
  122. bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
  123. struct scatterlist *sg;
  124. struct sg_table *pages;
  125. int si;
  126. pages = drm_gem_shmem_get_pages_sgt(&bo->base);
  127. if (IS_ERR(pages))
  128. return PTR_ERR(pages);
  129. if (use_dma_api)
  130. *nents = pages->nents;
  131. else
  132. *nents = pages->orig_nents;
  133. *ents = kvmalloc_array(*nents,
  134. sizeof(struct virtio_gpu_mem_entry),
  135. GFP_KERNEL);
  136. if (!(*ents)) {
  137. DRM_ERROR("failed to allocate ent list\n");
  138. return -ENOMEM;
  139. }
  140. if (use_dma_api) {
  141. for_each_sgtable_dma_sg(pages, sg, si) {
  142. (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
  143. (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
  144. (*ents)[si].padding = 0;
  145. }
  146. } else {
  147. for_each_sgtable_sg(pages, sg, si) {
  148. (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
  149. (*ents)[si].length = cpu_to_le32(sg->length);
  150. (*ents)[si].padding = 0;
  151. }
  152. }
  153. return 0;
  154. }
  155. int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
  156. struct virtio_gpu_object_params *params,
  157. struct virtio_gpu_object **bo_ptr,
  158. struct virtio_gpu_fence *fence)
  159. {
  160. struct virtio_gpu_object_array *objs = NULL;
  161. struct drm_gem_shmem_object *shmem_obj;
  162. struct virtio_gpu_object *bo;
  163. struct virtio_gpu_mem_entry *ents = NULL;
  164. unsigned int nents;
  165. int ret;
  166. *bo_ptr = NULL;
  167. params->size = roundup(params->size, PAGE_SIZE);
  168. shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
  169. if (IS_ERR(shmem_obj))
  170. return PTR_ERR(shmem_obj);
  171. bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
  172. ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
  173. if (ret < 0)
  174. goto err_free_gem;
  175. bo->dumb = params->dumb;
  176. ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
  177. if (ret != 0)
  178. goto err_put_id;
  179. if (fence) {
  180. ret = -ENOMEM;
  181. objs = virtio_gpu_array_alloc(1);
  182. if (!objs)
  183. goto err_free_entry;
  184. virtio_gpu_array_add_obj(objs, &bo->base.base);
  185. ret = virtio_gpu_array_lock_resv(objs);
  186. if (ret != 0)
  187. goto err_put_objs;
  188. }
  189. if (params->blob) {
  190. if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
  191. bo->guest_blob = true;
  192. virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
  193. ents, nents);
  194. } else if (params->virgl) {
  195. virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
  196. objs, fence);
  197. virtio_gpu_object_attach(vgdev, bo, ents, nents);
  198. } else {
  199. virtio_gpu_cmd_create_resource(vgdev, bo, params,
  200. objs, fence);
  201. virtio_gpu_object_attach(vgdev, bo, ents, nents);
  202. }
  203. *bo_ptr = bo;
  204. return 0;
  205. err_put_objs:
  206. virtio_gpu_array_put_free(objs);
  207. err_free_entry:
  208. kvfree(ents);
  209. err_put_id:
  210. virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
  211. err_free_gem:
  212. drm_gem_shmem_free(shmem_obj);
  213. return ret;
  214. }