virtgpu_vram.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "virtgpu_drv.h"
  3. #include <linux/dma-mapping.h>
  4. static void virtio_gpu_vram_free(struct drm_gem_object *obj)
  5. {
  6. struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  7. struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  8. struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  9. bool unmap;
  10. if (bo->created) {
  11. spin_lock(&vgdev->host_visible_lock);
  12. unmap = drm_mm_node_allocated(&vram->vram_node);
  13. spin_unlock(&vgdev->host_visible_lock);
  14. if (unmap)
  15. virtio_gpu_cmd_unmap(vgdev, bo);
  16. virtio_gpu_cmd_unref_resource(vgdev, bo);
  17. virtio_gpu_notify(vgdev);
  18. return;
  19. }
  20. }
  21. static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
  22. .open = drm_gem_vm_open,
  23. .close = drm_gem_vm_close,
  24. };
  25. static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
  26. struct vm_area_struct *vma)
  27. {
  28. int ret;
  29. struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  30. struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  31. struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  32. unsigned long vm_size = vma->vm_end - vma->vm_start;
  33. if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
  34. return -EINVAL;
  35. wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
  36. if (vram->map_state != STATE_OK)
  37. return -EINVAL;
  38. vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
  39. vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
  40. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  41. vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
  42. vma->vm_ops = &virtio_gpu_vram_vm_ops;
  43. if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
  44. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  45. else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
  46. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  47. /* Partial mappings of GEM buffers don't happen much in practice. */
  48. if (vm_size != vram->vram_node.size)
  49. return -EINVAL;
  50. ret = io_remap_pfn_range(vma, vma->vm_start,
  51. vram->vram_node.start >> PAGE_SHIFT,
  52. vm_size, vma->vm_page_prot);
  53. return ret;
  54. }
  55. struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
  56. struct device *dev,
  57. enum dma_data_direction dir)
  58. {
  59. struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  60. struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  61. struct sg_table *sgt;
  62. dma_addr_t addr;
  63. int ret;
  64. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  65. if (!sgt)
  66. return ERR_PTR(-ENOMEM);
  67. if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
  68. // Virtio devices can access the dma-buf via its UUID. Return a stub
  69. // sg_table so the dma-buf API still works.
  70. if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
  71. ret = -EIO;
  72. goto out;
  73. }
  74. return sgt;
  75. }
  76. ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  77. if (ret)
  78. goto out;
  79. addr = dma_map_resource(dev, vram->vram_node.start,
  80. vram->vram_node.size, dir,
  81. DMA_ATTR_SKIP_CPU_SYNC);
  82. ret = dma_mapping_error(dev, addr);
  83. if (ret)
  84. goto out;
  85. sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
  86. sg_dma_address(sgt->sgl) = addr;
  87. sg_dma_len(sgt->sgl) = vram->vram_node.size;
  88. return sgt;
  89. out:
  90. sg_free_table(sgt);
  91. kfree(sgt);
  92. return ERR_PTR(ret);
  93. }
  94. void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
  95. struct sg_table *sgt,
  96. enum dma_data_direction dir)
  97. {
  98. if (sgt->nents) {
  99. dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
  100. sg_dma_len(sgt->sgl), dir,
  101. DMA_ATTR_SKIP_CPU_SYNC);
  102. }
  103. sg_free_table(sgt);
  104. kfree(sgt);
  105. }
  106. static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
  107. .open = virtio_gpu_gem_object_open,
  108. .close = virtio_gpu_gem_object_close,
  109. .free = virtio_gpu_vram_free,
  110. .mmap = virtio_gpu_vram_mmap,
  111. .export = virtgpu_gem_prime_export,
  112. };
  113. bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
  114. {
  115. return bo->base.base.funcs == &virtio_gpu_vram_funcs;
  116. }
  117. static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
  118. {
  119. int ret;
  120. uint64_t offset;
  121. struct virtio_gpu_object_array *objs;
  122. struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  123. struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
  124. if (!vgdev->has_host_visible)
  125. return -EINVAL;
  126. spin_lock(&vgdev->host_visible_lock);
  127. ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
  128. bo->base.base.size);
  129. spin_unlock(&vgdev->host_visible_lock);
  130. if (ret)
  131. return ret;
  132. objs = virtio_gpu_array_alloc(1);
  133. if (!objs) {
  134. ret = -ENOMEM;
  135. goto err_remove_node;
  136. }
  137. virtio_gpu_array_add_obj(objs, &bo->base.base);
  138. /*TODO: Add an error checking helper function in drm_mm.h */
  139. offset = vram->vram_node.start - vgdev->host_visible_region.addr;
  140. ret = virtio_gpu_cmd_map(vgdev, objs, offset);
  141. if (ret) {
  142. virtio_gpu_array_put_free(objs);
  143. goto err_remove_node;
  144. }
  145. return 0;
  146. err_remove_node:
  147. spin_lock(&vgdev->host_visible_lock);
  148. drm_mm_remove_node(&vram->vram_node);
  149. spin_unlock(&vgdev->host_visible_lock);
  150. return ret;
  151. }
  152. int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
  153. struct virtio_gpu_object_params *params,
  154. struct virtio_gpu_object **bo_ptr)
  155. {
  156. struct drm_gem_object *obj;
  157. struct virtio_gpu_object_vram *vram;
  158. int ret;
  159. vram = kzalloc(sizeof(*vram), GFP_KERNEL);
  160. if (!vram)
  161. return -ENOMEM;
  162. obj = &vram->base.base.base;
  163. obj->funcs = &virtio_gpu_vram_funcs;
  164. params->size = PAGE_ALIGN(params->size);
  165. drm_gem_private_object_init(vgdev->ddev, obj, params->size);
  166. /* Create fake offset */
  167. ret = drm_gem_create_mmap_offset(obj);
  168. if (ret) {
  169. kfree(vram);
  170. return ret;
  171. }
  172. ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
  173. if (ret) {
  174. kfree(vram);
  175. return ret;
  176. }
  177. virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
  178. 0);
  179. if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
  180. ret = virtio_gpu_vram_map(&vram->base);
  181. if (ret) {
  182. virtio_gpu_vram_free(obj);
  183. return ret;
  184. }
  185. }
  186. *bo_ptr = &vram->base;
  187. return 0;
  188. }