msm_gem_vma.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /*
  2. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2016 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "msm_drv.h"
  19. #include "msm_gem.h"
  20. #include "msm_mmu.h"
  21. /* SDE address space operations */
  22. static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
  23. struct msm_gem_vma *vma, struct sg_table *sgt,
  24. unsigned int flags)
  25. {
  26. if (!vma->iova)
  27. return;
  28. if (aspace) {
  29. aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, sgt,
  30. DMA_BIDIRECTIONAL, flags);
  31. }
  32. vma->iova = 0;
  33. msm_gem_address_space_put(aspace);
  34. }
  35. static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
  36. struct msm_gem_vma *vma, struct sg_table *sgt,
  37. int npages, unsigned int flags)
  38. {
  39. int ret = -EINVAL;
  40. if (!aspace || !aspace->domain_attached)
  41. return ret;
  42. ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt,
  43. DMA_BIDIRECTIONAL, flags);
  44. if (!ret)
  45. vma->iova = sg_dma_address(sgt->sgl);
  46. /* Get a reference to the aspace to keep it around */
  47. kref_get(&aspace->kref);
  48. return ret;
  49. }
  50. static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
  51. {
  52. if (aspace->mmu)
  53. aspace->mmu->funcs->destroy(aspace->mmu);
  54. }
  55. static void smmu_aspace_add_to_active(
  56. struct msm_gem_address_space *aspace,
  57. struct msm_gem_object *msm_obj)
  58. {
  59. WARN_ON(!mutex_is_locked(&aspace->list_lock));
  60. list_move_tail(&msm_obj->iova_list, &aspace->active_list);
  61. }
  62. static void smmu_aspace_remove_from_active(
  63. struct msm_gem_address_space *aspace,
  64. struct msm_gem_object *obj)
  65. {
  66. struct msm_gem_object *msm_obj, *next;
  67. WARN_ON(!mutex_is_locked(&aspace->list_lock));
  68. list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
  69. iova_list) {
  70. if (msm_obj == obj) {
  71. list_del(&msm_obj->iova_list);
  72. break;
  73. }
  74. }
  75. }
  76. static int smmu_aspace_register_cb(
  77. struct msm_gem_address_space *aspace,
  78. void (*cb)(void *, bool),
  79. void *cb_data)
  80. {
  81. struct aspace_client *aclient = NULL;
  82. struct aspace_client *temp;
  83. if (!aspace)
  84. return -EINVAL;
  85. if (!aspace->domain_attached)
  86. return -EACCES;
  87. aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
  88. if (!aclient)
  89. return -ENOMEM;
  90. aclient->cb = cb;
  91. aclient->cb_data = cb_data;
  92. INIT_LIST_HEAD(&aclient->list);
  93. /* check if callback is already registered */
  94. mutex_lock(&aspace->list_lock);
  95. list_for_each_entry(temp, &aspace->clients, list) {
  96. if ((temp->cb == aclient->cb) &&
  97. (temp->cb_data == aclient->cb_data)) {
  98. kfree(aclient);
  99. mutex_unlock(&aspace->list_lock);
  100. return -EEXIST;
  101. }
  102. }
  103. list_move_tail(&aclient->list, &aspace->clients);
  104. mutex_unlock(&aspace->list_lock);
  105. return 0;
  106. }
  107. static int smmu_aspace_unregister_cb(
  108. struct msm_gem_address_space *aspace,
  109. void (*cb)(void *, bool),
  110. void *cb_data)
  111. {
  112. struct aspace_client *aclient = NULL;
  113. int rc = -ENOENT;
  114. if (!aspace || !cb)
  115. return -EINVAL;
  116. mutex_lock(&aspace->list_lock);
  117. list_for_each_entry(aclient, &aspace->clients, list) {
  118. if ((aclient->cb == cb) &&
  119. (aclient->cb_data == cb_data)) {
  120. list_del(&aclient->list);
  121. kfree(aclient);
  122. rc = 0;
  123. break;
  124. }
  125. }
  126. mutex_unlock(&aspace->list_lock);
  127. return rc;
  128. }
  129. static const struct msm_gem_aspace_ops smmu_aspace_ops = {
  130. .map = smmu_aspace_map_vma,
  131. .unmap = smmu_aspace_unmap_vma,
  132. .destroy = smmu_aspace_destroy,
  133. .add_to_active = smmu_aspace_add_to_active,
  134. .remove_from_active = smmu_aspace_remove_from_active,
  135. .register_cb = smmu_aspace_register_cb,
  136. .unregister_cb = smmu_aspace_unregister_cb,
  137. };
  138. struct msm_gem_address_space *
  139. msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
  140. const char *name)
  141. {
  142. struct msm_gem_address_space *aspace;
  143. if (!mmu)
  144. return ERR_PTR(-EINVAL);
  145. aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
  146. if (!aspace)
  147. return ERR_PTR(-ENOMEM);
  148. spin_lock_init(&aspace->lock);
  149. aspace->dev = dev;
  150. aspace->name = name;
  151. aspace->mmu = mmu;
  152. aspace->ops = &smmu_aspace_ops;
  153. INIT_LIST_HEAD(&aspace->active_list);
  154. INIT_LIST_HEAD(&aspace->clients);
  155. kref_init(&aspace->kref);
  156. mutex_init(&aspace->list_lock);
  157. return aspace;
  158. }
  159. static void
  160. msm_gem_address_space_destroy(struct kref *kref)
  161. {
  162. struct msm_gem_address_space *aspace = container_of(kref,
  163. struct msm_gem_address_space, kref);
  164. if (aspace && aspace->ops->destroy)
  165. aspace->ops->destroy(aspace);
  166. kfree(aspace);
  167. }
  168. void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
  169. {
  170. if (aspace)
  171. kref_put(&aspace->kref, msm_gem_address_space_destroy);
  172. }
  173. /* GPU address space operations */
  174. static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
  175. struct msm_gem_vma *vma, struct sg_table *sgt,
  176. unsigned int flags)
  177. {
  178. if (!aspace || !vma->iova)
  179. return;
  180. if (aspace->mmu) {
  181. unsigned size = vma->node.size << PAGE_SHIFT;
  182. aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
  183. }
  184. spin_lock(&aspace->lock);
  185. drm_mm_remove_node(&vma->node);
  186. spin_unlock(&aspace->lock);
  187. vma->iova = 0;
  188. msm_gem_address_space_put(aspace);
  189. }
  190. void
  191. msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
  192. struct msm_gem_vma *vma, struct sg_table *sgt,
  193. unsigned int flags)
  194. {
  195. if (aspace && aspace->ops->unmap)
  196. aspace->ops->unmap(aspace, vma, sgt, flags);
  197. }
  198. static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
  199. struct msm_gem_vma *vma, struct sg_table *sgt,
  200. int npages, unsigned int flags)
  201. {
  202. int ret;
  203. spin_lock(&aspace->lock);
  204. if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
  205. spin_unlock(&aspace->lock);
  206. return 0;
  207. }
  208. ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
  209. spin_unlock(&aspace->lock);
  210. if (ret)
  211. return ret;
  212. vma->iova = vma->node.start << PAGE_SHIFT;
  213. if (aspace->mmu) {
  214. unsigned size = npages << PAGE_SHIFT;
  215. ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
  216. size, IOMMU_READ | IOMMU_WRITE);
  217. }
  218. /* Get a reference to the aspace to keep it around */
  219. kref_get(&aspace->kref);
  220. return ret;
  221. }
  222. static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
  223. {
  224. drm_mm_takedown(&aspace->mm);
  225. if (aspace->mmu)
  226. aspace->mmu->funcs->destroy(aspace->mmu);
  227. }
  228. static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
  229. .map = iommu_aspace_map_vma,
  230. .unmap = iommu_aspace_unmap_vma,
  231. .destroy = iommu_aspace_destroy,
  232. };
  233. struct msm_gem_address_space *
  234. msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
  235. const char *name)
  236. {
  237. struct msm_gem_address_space *aspace;
  238. u64 size = domain->geometry.aperture_end -
  239. domain->geometry.aperture_start;
  240. aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
  241. if (!aspace)
  242. return ERR_PTR(-ENOMEM);
  243. spin_lock_init(&aspace->lock);
  244. aspace->name = name;
  245. aspace->mmu = msm_iommu_new(dev, domain);
  246. aspace->ops = &msm_iommu_aspace_ops;
  247. drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
  248. size >> PAGE_SHIFT);
  249. kref_init(&aspace->kref);
  250. return aspace;
  251. }
  252. int
  253. msm_gem_map_vma(struct msm_gem_address_space *aspace,
  254. struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
  255. unsigned int flags)
  256. {
  257. if (aspace && aspace->ops->map)
  258. return aspace->ops->map(aspace, vma, sgt, npages, flags);
  259. return -EINVAL;
  260. }
  261. struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace)
  262. {
  263. struct device *client_dev = NULL;
  264. if (aspace && aspace->mmu && aspace->mmu->funcs->get_dev)
  265. client_dev = aspace->mmu->funcs->get_dev(aspace->mmu);
  266. return client_dev;
  267. }
  268. void msm_gem_add_obj_to_aspace_active_list(
  269. struct msm_gem_address_space *aspace,
  270. struct drm_gem_object *obj)
  271. {
  272. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  273. if (aspace && aspace->ops && aspace->ops->add_to_active)
  274. aspace->ops->add_to_active(aspace, msm_obj);
  275. }
  276. void msm_gem_remove_obj_from_aspace_active_list(
  277. struct msm_gem_address_space *aspace,
  278. struct drm_gem_object *obj)
  279. {
  280. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  281. if (aspace && aspace->ops && aspace->ops->remove_from_active)
  282. aspace->ops->remove_from_active(aspace, msm_obj);
  283. }
  284. int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
  285. void (*cb)(void *, bool),
  286. void *cb_data)
  287. {
  288. if (aspace && aspace->ops && aspace->ops->register_cb)
  289. return aspace->ops->register_cb(aspace, cb, cb_data);
  290. return -EINVAL;
  291. }
  292. int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
  293. void (*cb)(void *, bool),
  294. void *cb_data)
  295. {
  296. if (aspace && aspace->ops && aspace->ops->unregister_cb)
  297. return aspace->ops->unregister_cb(aspace, cb, cb_data);
  298. return -EINVAL;
  299. }