msm_gem_vma.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /*
  2. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2016 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "msm_drv.h"
  19. #include "msm_gem.h"
  20. #include "msm_mmu.h"
  21. /* SDE address space operations */
  22. static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
  23. struct msm_gem_vma *vma, struct sg_table *sgt,
  24. unsigned int flags)
  25. {
  26. if (!vma->iova)
  27. return;
  28. if (aspace) {
  29. aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, sgt,
  30. DMA_BIDIRECTIONAL, flags);
  31. }
  32. vma->iova = 0;
  33. msm_gem_address_space_put(aspace);
  34. }
  35. static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
  36. struct msm_gem_vma *vma, struct sg_table *sgt,
  37. int npages, unsigned int flags)
  38. {
  39. int ret = -EINVAL;
  40. if (!aspace || !aspace->domain_attached)
  41. return ret;
  42. ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt,
  43. DMA_BIDIRECTIONAL, flags);
  44. if (!ret)
  45. vma->iova = sg_dma_address(sgt->sgl);
  46. /* Get a reference to the aspace to keep it around */
  47. kref_get(&aspace->kref);
  48. return ret;
  49. }
  50. static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
  51. {
  52. if (aspace->mmu)
  53. aspace->mmu->funcs->destroy(aspace->mmu);
  54. }
  55. static void smmu_aspace_add_to_active(
  56. struct msm_gem_address_space *aspace,
  57. struct msm_gem_object *msm_obj)
  58. {
  59. WARN_ON(!mutex_is_locked(&aspace->list_lock));
  60. list_move_tail(&msm_obj->iova_list, &aspace->active_list);
  61. msm_obj->in_active_list = true;
  62. }
  63. static void smmu_aspace_remove_from_active(
  64. struct msm_gem_address_space *aspace,
  65. struct msm_gem_object *obj)
  66. {
  67. struct msm_gem_object *msm_obj, *next;
  68. WARN_ON(!mutex_is_locked(&aspace->list_lock));
  69. list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
  70. iova_list) {
  71. if (msm_obj == obj) {
  72. msm_obj->in_active_list = false;
  73. list_del(&msm_obj->iova_list);
  74. break;
  75. }
  76. }
  77. }
  78. static int smmu_aspace_register_cb(
  79. struct msm_gem_address_space *aspace,
  80. void (*cb)(void *, bool),
  81. void *cb_data)
  82. {
  83. struct aspace_client *aclient = NULL;
  84. struct aspace_client *temp;
  85. if (!aspace)
  86. return -EINVAL;
  87. if (!aspace->domain_attached)
  88. return -EACCES;
  89. aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
  90. if (!aclient)
  91. return -ENOMEM;
  92. aclient->cb = cb;
  93. aclient->cb_data = cb_data;
  94. INIT_LIST_HEAD(&aclient->list);
  95. /* check if callback is already registered */
  96. mutex_lock(&aspace->list_lock);
  97. list_for_each_entry(temp, &aspace->clients, list) {
  98. if ((temp->cb == aclient->cb) &&
  99. (temp->cb_data == aclient->cb_data)) {
  100. kfree(aclient);
  101. mutex_unlock(&aspace->list_lock);
  102. return -EEXIST;
  103. }
  104. }
  105. list_move_tail(&aclient->list, &aspace->clients);
  106. mutex_unlock(&aspace->list_lock);
  107. return 0;
  108. }
  109. static int smmu_aspace_unregister_cb(
  110. struct msm_gem_address_space *aspace,
  111. void (*cb)(void *, bool),
  112. void *cb_data)
  113. {
  114. struct aspace_client *aclient = NULL;
  115. int rc = -ENOENT;
  116. if (!aspace || !cb)
  117. return -EINVAL;
  118. mutex_lock(&aspace->list_lock);
  119. list_for_each_entry(aclient, &aspace->clients, list) {
  120. if ((aclient->cb == cb) &&
  121. (aclient->cb_data == cb_data)) {
  122. list_del(&aclient->list);
  123. kfree(aclient);
  124. rc = 0;
  125. break;
  126. }
  127. }
  128. mutex_unlock(&aspace->list_lock);
  129. return rc;
  130. }
  131. static const struct msm_gem_aspace_ops smmu_aspace_ops = {
  132. .map = smmu_aspace_map_vma,
  133. .unmap = smmu_aspace_unmap_vma,
  134. .destroy = smmu_aspace_destroy,
  135. .add_to_active = smmu_aspace_add_to_active,
  136. .remove_from_active = smmu_aspace_remove_from_active,
  137. .register_cb = smmu_aspace_register_cb,
  138. .unregister_cb = smmu_aspace_unregister_cb,
  139. };
  140. struct msm_gem_address_space *
  141. msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
  142. const char *name)
  143. {
  144. struct msm_gem_address_space *aspace;
  145. if (!mmu)
  146. return ERR_PTR(-EINVAL);
  147. aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
  148. if (!aspace)
  149. return ERR_PTR(-ENOMEM);
  150. spin_lock_init(&aspace->lock);
  151. aspace->dev = dev;
  152. aspace->name = name;
  153. aspace->mmu = mmu;
  154. aspace->ops = &smmu_aspace_ops;
  155. INIT_LIST_HEAD(&aspace->active_list);
  156. INIT_LIST_HEAD(&aspace->clients);
  157. kref_init(&aspace->kref);
  158. mutex_init(&aspace->list_lock);
  159. return aspace;
  160. }
  161. static void
  162. msm_gem_address_space_destroy(struct kref *kref)
  163. {
  164. struct msm_gem_address_space *aspace = container_of(kref,
  165. struct msm_gem_address_space, kref);
  166. drm_mm_takedown(&aspace->mm);
  167. if (aspace->mmu)
  168. aspace->mmu->funcs->destroy(aspace->mmu);
  169. kfree(aspace);
  170. }
  171. void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
  172. {
  173. if (aspace)
  174. kref_put(&aspace->kref, msm_gem_address_space_destroy);
  175. }
  176. /* GPU address space operations */
  177. static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
  178. struct msm_gem_vma *vma, struct sg_table *sgt,
  179. unsigned int flags)
  180. {
  181. if (!aspace || !vma->iova)
  182. return;
  183. if (aspace->mmu) {
  184. unsigned size = vma->node.size << PAGE_SHIFT;
  185. aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
  186. }
  187. spin_lock(&aspace->lock);
  188. drm_mm_remove_node(&vma->node);
  189. spin_unlock(&aspace->lock);
  190. vma->iova = 0;
  191. msm_gem_address_space_put(aspace);
  192. }
  193. void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
  194. struct msm_gem_vma *vma, struct sg_table *sgt,
  195. unsigned int flags)
  196. {
  197. if (aspace && aspace->ops->unmap)
  198. aspace->ops->unmap(aspace, vma, sgt, flags);
  199. }
  200. static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
  201. struct msm_gem_vma *vma, struct sg_table *sgt,
  202. int npages, unsigned int flags)
  203. {
  204. int ret;
  205. spin_lock(&aspace->lock);
  206. if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
  207. spin_unlock(&aspace->lock);
  208. return 0;
  209. }
  210. ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
  211. spin_unlock(&aspace->lock);
  212. if (ret)
  213. return ret;
  214. vma->iova = vma->node.start << PAGE_SHIFT;
  215. if (aspace->mmu) {
  216. unsigned size = npages << PAGE_SHIFT;
  217. ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
  218. size, IOMMU_READ | IOMMU_WRITE);
  219. }
  220. /* Get a reference to the aspace to keep it around */
  221. kref_get(&aspace->kref);
  222. return ret;
  223. }
  224. static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
  225. {
  226. drm_mm_takedown(&aspace->mm);
  227. if (aspace->mmu)
  228. aspace->mmu->funcs->destroy(aspace->mmu);
  229. }
  230. static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
  231. .map = iommu_aspace_map_vma,
  232. .unmap = iommu_aspace_unmap_vma,
  233. .destroy = iommu_aspace_destroy,
  234. };
  235. struct msm_gem_address_space *
  236. msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
  237. const char *name)
  238. {
  239. struct msm_gem_address_space *aspace;
  240. u64 size = domain->geometry.aperture_end -
  241. domain->geometry.aperture_start;
  242. aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
  243. if (!aspace)
  244. return ERR_PTR(-ENOMEM);
  245. spin_lock_init(&aspace->lock);
  246. aspace->name = name;
  247. aspace->mmu = msm_iommu_new(dev, domain);
  248. aspace->ops = &msm_iommu_aspace_ops;
  249. drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
  250. size >> PAGE_SHIFT);
  251. kref_init(&aspace->kref);
  252. return aspace;
  253. }
  254. int
  255. msm_gem_map_vma(struct msm_gem_address_space *aspace,
  256. struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
  257. unsigned int flags)
  258. {
  259. if (aspace && aspace->ops->map)
  260. return aspace->ops->map(aspace, vma, sgt, npages, flags);
  261. return -EINVAL;
  262. }
  263. struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace)
  264. {
  265. struct device *client_dev = NULL;
  266. if (aspace && aspace->mmu && aspace->mmu->funcs->get_dev)
  267. client_dev = aspace->mmu->funcs->get_dev(aspace->mmu);
  268. return client_dev;
  269. }
  270. void msm_gem_add_obj_to_aspace_active_list(
  271. struct msm_gem_address_space *aspace,
  272. struct drm_gem_object *obj)
  273. {
  274. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  275. if (aspace && aspace->ops && aspace->ops->add_to_active)
  276. aspace->ops->add_to_active(aspace, msm_obj);
  277. }
  278. void msm_gem_remove_obj_from_aspace_active_list(
  279. struct msm_gem_address_space *aspace,
  280. struct drm_gem_object *obj)
  281. {
  282. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  283. if (aspace && aspace->ops && aspace->ops->remove_from_active)
  284. aspace->ops->remove_from_active(aspace, msm_obj);
  285. }
  286. int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
  287. void (*cb)(void *, bool),
  288. void *cb_data)
  289. {
  290. if (aspace && aspace->ops && aspace->ops->register_cb)
  291. return aspace->ops->register_cb(aspace, cb, cb_data);
  292. return -EINVAL;
  293. }
  294. int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
  295. void (*cb)(void *, bool),
  296. void *cb_data)
  297. {
  298. if (aspace && aspace->ops && aspace->ops->unregister_cb)
  299. return aspace->ops->unregister_cb(aspace, cb, cb_data);
  300. return -EINVAL;
  301. }