panfrost_gem.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
  3. #include <linux/err.h>
  4. #include <linux/slab.h>
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-mapping.h>
  7. #include <drm/panfrost_drm.h>
  8. #include "panfrost_device.h"
  9. #include "panfrost_gem.h"
  10. #include "panfrost_mmu.h"
  11. /* Called DRM core on the last userspace/kernel unreference of the
  12. * BO.
  13. */
  14. static void panfrost_gem_free_object(struct drm_gem_object *obj)
  15. {
  16. struct panfrost_gem_object *bo = to_panfrost_bo(obj);
  17. struct panfrost_device *pfdev = obj->dev->dev_private;
  18. /*
  19. * Make sure the BO is no longer inserted in the shrinker list before
  20. * taking care of the destruction itself. If we don't do that we have a
  21. * race condition between this function and what's done in
  22. * panfrost_gem_shrinker_scan().
  23. */
  24. mutex_lock(&pfdev->shrinker_lock);
  25. list_del_init(&bo->base.madv_list);
  26. mutex_unlock(&pfdev->shrinker_lock);
  27. /*
  28. * If we still have mappings attached to the BO, there's a problem in
  29. * our refcounting.
  30. */
  31. WARN_ON_ONCE(!list_empty(&bo->mappings.list));
  32. if (bo->sgts) {
  33. int i;
  34. int n_sgt = bo->base.base.size / SZ_2M;
  35. for (i = 0; i < n_sgt; i++) {
  36. if (bo->sgts[i].sgl) {
  37. dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
  38. DMA_BIDIRECTIONAL, 0);
  39. sg_free_table(&bo->sgts[i]);
  40. }
  41. }
  42. kvfree(bo->sgts);
  43. }
  44. drm_gem_shmem_free(&bo->base);
  45. }
  46. struct panfrost_gem_mapping *
  47. panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
  48. struct panfrost_file_priv *priv)
  49. {
  50. struct panfrost_gem_mapping *iter, *mapping = NULL;
  51. mutex_lock(&bo->mappings.lock);
  52. list_for_each_entry(iter, &bo->mappings.list, node) {
  53. if (iter->mmu == priv->mmu) {
  54. kref_get(&iter->refcount);
  55. mapping = iter;
  56. break;
  57. }
  58. }
  59. mutex_unlock(&bo->mappings.lock);
  60. return mapping;
  61. }
  62. static void
  63. panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
  64. {
  65. if (mapping->active)
  66. panfrost_mmu_unmap(mapping);
  67. spin_lock(&mapping->mmu->mm_lock);
  68. if (drm_mm_node_allocated(&mapping->mmnode))
  69. drm_mm_remove_node(&mapping->mmnode);
  70. spin_unlock(&mapping->mmu->mm_lock);
  71. }
  72. static void panfrost_gem_mapping_release(struct kref *kref)
  73. {
  74. struct panfrost_gem_mapping *mapping;
  75. mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
  76. panfrost_gem_teardown_mapping(mapping);
  77. drm_gem_object_put(&mapping->obj->base.base);
  78. panfrost_mmu_ctx_put(mapping->mmu);
  79. kfree(mapping);
  80. }
  81. void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
  82. {
  83. if (!mapping)
  84. return;
  85. kref_put(&mapping->refcount, panfrost_gem_mapping_release);
  86. }
  87. void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
  88. {
  89. struct panfrost_gem_mapping *mapping;
  90. list_for_each_entry(mapping, &bo->mappings.list, node)
  91. panfrost_gem_teardown_mapping(mapping);
  92. }
  93. int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
  94. {
  95. int ret;
  96. size_t size = obj->size;
  97. u64 align;
  98. struct panfrost_gem_object *bo = to_panfrost_bo(obj);
  99. unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
  100. struct panfrost_file_priv *priv = file_priv->driver_priv;
  101. struct panfrost_gem_mapping *mapping;
  102. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  103. if (!mapping)
  104. return -ENOMEM;
  105. INIT_LIST_HEAD(&mapping->node);
  106. kref_init(&mapping->refcount);
  107. drm_gem_object_get(obj);
  108. mapping->obj = bo;
  109. /*
  110. * Executable buffers cannot cross a 16MB boundary as the program
  111. * counter is 24-bits. We assume executable buffers will be less than
  112. * 16MB and aligning executable buffers to their size will avoid
  113. * crossing a 16MB boundary.
  114. */
  115. if (!bo->noexec)
  116. align = size >> PAGE_SHIFT;
  117. else
  118. align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
  119. mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
  120. spin_lock(&mapping->mmu->mm_lock);
  121. ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
  122. size >> PAGE_SHIFT, align, color, 0);
  123. spin_unlock(&mapping->mmu->mm_lock);
  124. if (ret)
  125. goto err;
  126. if (!bo->is_heap) {
  127. ret = panfrost_mmu_map(mapping);
  128. if (ret)
  129. goto err;
  130. }
  131. mutex_lock(&bo->mappings.lock);
  132. WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
  133. list_add_tail(&mapping->node, &bo->mappings.list);
  134. mutex_unlock(&bo->mappings.lock);
  135. err:
  136. if (ret)
  137. panfrost_gem_mapping_put(mapping);
  138. return ret;
  139. }
  140. void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
  141. {
  142. struct panfrost_file_priv *priv = file_priv->driver_priv;
  143. struct panfrost_gem_object *bo = to_panfrost_bo(obj);
  144. struct panfrost_gem_mapping *mapping = NULL, *iter;
  145. mutex_lock(&bo->mappings.lock);
  146. list_for_each_entry(iter, &bo->mappings.list, node) {
  147. if (iter->mmu == priv->mmu) {
  148. mapping = iter;
  149. list_del(&iter->node);
  150. break;
  151. }
  152. }
  153. mutex_unlock(&bo->mappings.lock);
  154. panfrost_gem_mapping_put(mapping);
  155. }
  156. static int panfrost_gem_pin(struct drm_gem_object *obj)
  157. {
  158. struct panfrost_gem_object *bo = to_panfrost_bo(obj);
  159. if (bo->is_heap)
  160. return -EINVAL;
  161. return drm_gem_shmem_pin(&bo->base);
  162. }
  163. static const struct drm_gem_object_funcs panfrost_gem_funcs = {
  164. .free = panfrost_gem_free_object,
  165. .open = panfrost_gem_open,
  166. .close = panfrost_gem_close,
  167. .print_info = drm_gem_shmem_object_print_info,
  168. .pin = panfrost_gem_pin,
  169. .unpin = drm_gem_shmem_object_unpin,
  170. .get_sg_table = drm_gem_shmem_object_get_sg_table,
  171. .vmap = drm_gem_shmem_object_vmap,
  172. .vunmap = drm_gem_shmem_object_vunmap,
  173. .mmap = drm_gem_shmem_object_mmap,
  174. .vm_ops = &drm_gem_shmem_vm_ops,
  175. };
  176. /**
  177. * panfrost_gem_create_object - Implementation of driver->gem_create_object.
  178. * @dev: DRM device
  179. * @size: Size in bytes of the memory the object will reference
  180. *
  181. * This lets the GEM helpers allocate object structs for us, and keep
  182. * our BO stats correct.
  183. */
  184. struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
  185. {
  186. struct panfrost_device *pfdev = dev->dev_private;
  187. struct panfrost_gem_object *obj;
  188. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  189. if (!obj)
  190. return ERR_PTR(-ENOMEM);
  191. INIT_LIST_HEAD(&obj->mappings.list);
  192. mutex_init(&obj->mappings.lock);
  193. obj->base.base.funcs = &panfrost_gem_funcs;
  194. obj->base.map_wc = !pfdev->coherent;
  195. return &obj->base.base;
  196. }
  197. struct panfrost_gem_object *
  198. panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
  199. {
  200. struct drm_gem_shmem_object *shmem;
  201. struct panfrost_gem_object *bo;
  202. /* Round up heap allocations to 2MB to keep fault handling simple */
  203. if (flags & PANFROST_BO_HEAP)
  204. size = roundup(size, SZ_2M);
  205. shmem = drm_gem_shmem_create(dev, size);
  206. if (IS_ERR(shmem))
  207. return ERR_CAST(shmem);
  208. bo = to_panfrost_bo(&shmem->base);
  209. bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
  210. bo->is_heap = !!(flags & PANFROST_BO_HEAP);
  211. return bo;
  212. }
  213. struct drm_gem_object *
  214. panfrost_gem_prime_import_sg_table(struct drm_device *dev,
  215. struct dma_buf_attachment *attach,
  216. struct sg_table *sgt)
  217. {
  218. struct drm_gem_object *obj;
  219. struct panfrost_gem_object *bo;
  220. obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
  221. if (IS_ERR(obj))
  222. return ERR_CAST(obj);
  223. bo = to_panfrost_bo(obj);
  224. bo->noexec = true;
  225. return obj;
  226. }