msm_gem.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. /*
  2. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/qcom-dma-mapping.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/shmem_fs.h>
  22. #include <linux/dma-buf.h>
  23. #include <linux/pfn_t.h>
  24. #include <linux/version.h>
  25. #include <linux/module.h>
  26. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  27. #include <linux/ion.h>
  28. #endif
  29. #include "msm_drv.h"
  30. #include "msm_gem.h"
  31. #include "msm_mmu.h"
  32. #include "sde_dbg.h"
  33. #define GUARD_BYTES (BIT(8) - 1)
  34. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  35. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  36. static dma_addr_t physaddr(struct drm_gem_object *obj)
  37. {
  38. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  39. struct msm_drm_private *priv = obj->dev->dev_private;
  40. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  41. priv->vram.paddr;
  42. }
  43. static bool use_pages(struct drm_gem_object *obj)
  44. {
  45. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  46. return !msm_obj->vram_node;
  47. }
  48. /* allocate pages from VRAM carveout, used when no IOMMU: */
  49. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  50. {
  51. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  52. struct msm_drm_private *priv = obj->dev->dev_private;
  53. dma_addr_t paddr;
  54. struct page **p;
  55. int ret, i;
  56. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  57. if (!p)
  58. return ERR_PTR(-ENOMEM);
  59. spin_lock(&priv->vram.lock);
  60. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  61. spin_unlock(&priv->vram.lock);
  62. if (ret) {
  63. kvfree(p);
  64. return ERR_PTR(ret);
  65. }
  66. paddr = physaddr(obj);
  67. for (i = 0; i < npages; i++) {
  68. p[i] = phys_to_page(paddr);
  69. paddr += PAGE_SIZE;
  70. }
  71. return p;
  72. }
  73. static struct page **get_pages(struct drm_gem_object *obj)
  74. {
  75. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  76. struct device *aspace_dev;
  77. if (obj->import_attach)
  78. return msm_obj->pages;
  79. if (!msm_obj->pages) {
  80. struct drm_device *dev = obj->dev;
  81. struct page **p;
  82. int npages = obj->size >> PAGE_SHIFT;
  83. if (use_pages(obj))
  84. p = drm_gem_get_pages(obj);
  85. else
  86. p = get_pages_vram(obj, npages);
  87. if (IS_ERR(p)) {
  88. DISP_DEV_ERR(dev->dev, "could not get pages: %ld\n",
  89. PTR_ERR(p));
  90. return p;
  91. }
  92. msm_obj->pages = p;
  93. msm_obj->sgt = drm_prime_pages_to_sg(dev, p, npages);
  94. if (IS_ERR(msm_obj->sgt)) {
  95. void *ptr = ERR_CAST(msm_obj->sgt);
  96. DISP_DEV_ERR(dev->dev, "failed to allocate sgt\n");
  97. msm_obj->sgt = NULL;
  98. return ptr;
  99. }
  100. if (msm_obj->vram_node) {
  101. goto end;
  102. /*
  103. * For non-cached buffers, ensure the new pages are clean
  104. * because display controller, GPU, etc. are not coherent
  105. */
  106. } else if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
  107. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  108. if (aspace_dev) {
  109. dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
  110. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  111. /* mark the buffer as external buffer */
  112. msm_obj->flags |= MSM_BO_EXTBUF;
  113. } else {
  114. DRM_ERROR("failed to get aspace_device\n");
  115. }
  116. }
  117. }
  118. end:
  119. return msm_obj->pages;
  120. }
  121. static void put_pages_vram(struct drm_gem_object *obj)
  122. {
  123. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  124. struct msm_drm_private *priv = obj->dev->dev_private;
  125. spin_lock(&priv->vram.lock);
  126. drm_mm_remove_node(msm_obj->vram_node);
  127. spin_unlock(&priv->vram.lock);
  128. kvfree(msm_obj->pages);
  129. }
  130. static void put_pages(struct drm_gem_object *obj)
  131. {
  132. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  133. if (msm_obj->pages) {
  134. if (msm_obj->sgt) {
  135. sg_free_table(msm_obj->sgt);
  136. kfree(msm_obj->sgt);
  137. }
  138. if (use_pages(obj))
  139. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  140. else
  141. put_pages_vram(obj);
  142. msm_obj->pages = NULL;
  143. }
  144. }
  145. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  146. {
  147. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  148. struct page **p;
  149. mutex_lock(&msm_obj->lock);
  150. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  151. mutex_unlock(&msm_obj->lock);
  152. return ERR_PTR(-EBUSY);
  153. }
  154. p = get_pages(obj);
  155. mutex_unlock(&msm_obj->lock);
  156. return p;
  157. }
  158. void msm_gem_put_pages(struct drm_gem_object *obj)
  159. {
  160. /* when we start tracking the pin count, then do something here */
  161. }
  162. void msm_gem_sync(struct drm_gem_object *obj)
  163. {
  164. struct msm_gem_object *msm_obj;
  165. struct device *aspace_dev;
  166. if (!obj)
  167. return;
  168. msm_obj = to_msm_bo(obj);
  169. if (msm_obj->vram_node)
  170. return;
  171. /*
  172. * dma_sync_sg_for_device synchronises a single contiguous or
  173. * scatter/gather mapping for the CPU and device.
  174. */
  175. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  176. if (aspace_dev)
  177. dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
  178. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  179. else
  180. DRM_ERROR("failed to get aspace_device\n");
  181. }
  182. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  183. struct vm_area_struct *vma)
  184. {
  185. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  186. #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 25))
  187. vma->vm_flags &= ~VM_PFNMAP;
  188. vma->vm_flags |= VM_MIXEDMAP;
  189. #else
  190. vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
  191. #endif
  192. if (msm_obj->flags & MSM_BO_WC) {
  193. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  194. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  195. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  196. } else {
  197. /*
  198. * Shunt off cached objs to shmem file so they have their own
  199. * address_space (so unmap_mapping_range does what we want,
  200. * in particular in the case of mmap'd dmabufs)
  201. */
  202. fput(vma->vm_file);
  203. get_file(obj->filp);
  204. vma->vm_pgoff = 0;
  205. vma->vm_file = obj->filp;
  206. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  207. }
  208. return 0;
  209. }
  210. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  211. {
  212. int ret;
  213. ret = drm_gem_mmap(filp, vma);
  214. if (ret) {
  215. DBG("mmap failed: %d", ret);
  216. return ret;
  217. }
  218. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  219. }
  220. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  221. static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  222. #else
  223. vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  224. #endif
  225. {
  226. struct vm_area_struct *vma = vmf->vma;
  227. struct drm_gem_object *obj = vma->vm_private_data;
  228. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  229. struct page **pages;
  230. unsigned long pfn;
  231. pgoff_t pgoff;
  232. int err;
  233. vm_fault_t ret;
  234. /*
  235. * vm_ops.open/drm_gem_mmap_obj and close get and put
  236. * a reference on obj. So, we dont need to hold one here.
  237. */
  238. err = mutex_lock_interruptible(&msm_obj->lock);
  239. if (err) {
  240. ret = VM_FAULT_NOPAGE;
  241. goto out;
  242. }
  243. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  244. mutex_unlock(&msm_obj->lock);
  245. return VM_FAULT_SIGBUS;
  246. }
  247. /* make sure we have pages attached now */
  248. pages = get_pages(obj);
  249. if (IS_ERR(pages)) {
  250. ret = vmf_error(PTR_ERR(pages));
  251. goto out_unlock;
  252. }
  253. /* We don't use vmf->pgoff since that has the fake offset: */
  254. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  255. pfn = page_to_pfn(pages[pgoff]);
  256. VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
  257. pfn, pfn << PAGE_SHIFT);
  258. ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  259. out_unlock:
  260. mutex_unlock(&msm_obj->lock);
  261. out:
  262. return ret;
  263. }
  264. /** get mmap offset */
  265. static uint64_t mmap_offset(struct drm_gem_object *obj)
  266. {
  267. struct drm_device *dev = obj->dev;
  268. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  269. int ret;
  270. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  271. /* Make it mmapable */
  272. ret = drm_gem_create_mmap_offset(obj);
  273. if (ret) {
  274. DISP_DEV_ERR(dev->dev, "could not allocate mmap offset\n");
  275. return 0;
  276. }
  277. return drm_vma_node_offset_addr(&obj->vma_node);
  278. }
  279. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  280. {
  281. uint64_t offset;
  282. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  283. mutex_lock(&msm_obj->lock);
  284. offset = mmap_offset(obj);
  285. mutex_unlock(&msm_obj->lock);
  286. return offset;
  287. }
  288. dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
  289. {
  290. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  291. struct sg_table *sgt;
  292. if (!msm_obj->sgt) {
  293. sgt = dma_buf_map_attachment(obj->import_attach,
  294. DMA_BIDIRECTIONAL);
  295. if (IS_ERR_OR_NULL(sgt)) {
  296. DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
  297. PTR_ERR(sgt));
  298. return 0;
  299. }
  300. msm_obj->sgt = sgt;
  301. }
  302. return sg_dma_address(msm_obj->sgt->sgl);
  303. }
  304. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  305. struct msm_gem_address_space *aspace)
  306. {
  307. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  308. struct msm_gem_vma *vma;
  309. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  310. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  311. if (!vma)
  312. return ERR_PTR(-ENOMEM);
  313. vma->aspace = aspace;
  314. msm_obj->aspace = aspace;
  315. list_add_tail(&vma->list, &msm_obj->vmas);
  316. return vma;
  317. }
  318. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  319. struct msm_gem_address_space *aspace)
  320. {
  321. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  322. struct msm_gem_vma *vma;
  323. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  324. list_for_each_entry(vma, &msm_obj->vmas, list) {
  325. if (vma->aspace == aspace)
  326. return vma;
  327. }
  328. return NULL;
  329. }
  330. static void del_vma(struct msm_gem_vma *vma)
  331. {
  332. if (!vma)
  333. return;
  334. list_del(&vma->list);
  335. kfree(vma);
  336. }
  337. /* Called with msm_obj->lock locked */
  338. static void
  339. put_iova(struct drm_gem_object *obj)
  340. {
  341. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  342. struct msm_gem_vma *vma, *tmp;
  343. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  344. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  345. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  346. msm_obj->flags);
  347. /*
  348. * put_iova removes the domain connected to the obj which makes
  349. * the aspace inaccessible. Store the aspace, as it is used to
  350. * update the active_list during gem_free_obj and gem_purge.
  351. */
  352. msm_obj->aspace = vma->aspace;
  353. del_vma(vma);
  354. }
  355. }
  356. /* get iova, taking a reference. Should have a matching put */
  357. static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
  358. struct msm_gem_address_space *aspace, uint64_t *iova)
  359. {
  360. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  361. struct msm_gem_vma *vma;
  362. struct device *dev;
  363. int ret = 0;
  364. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  365. vma = lookup_vma(obj, aspace);
  366. dev = msm_gem_get_aspace_device(aspace);
  367. if (!vma) {
  368. struct page **pages;
  369. struct dma_buf *dmabuf;
  370. bool reattach = false;
  371. unsigned long dma_map_attrs;
  372. if ((dev && obj->import_attach) &&
  373. ((dev != obj->import_attach->dev) ||
  374. msm_obj->obj_dirty)) {
  375. if (of_device_is_compatible(dev->of_node, "qcom,smmu_sde_unsec") &&
  376. of_device_is_compatible(obj->import_attach->dev->of_node,
  377. "qcom,smmu_sde_sec")) {
  378. SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
  379. msm_obj->obj_dirty);
  380. DRM_ERROR("gem obj found mapped to %s, now requesting map on %s",
  381. dev_name(obj->import_attach->dev), dev_name(dev));
  382. return -EINVAL;
  383. }
  384. dmabuf = obj->import_attach->dmabuf;
  385. dma_map_attrs = obj->import_attach->dma_map_attrs;
  386. DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
  387. obj->import_attach->dev, dev);
  388. SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
  389. msm_obj->obj_dirty);
  390. if (msm_obj->sgt)
  391. dma_buf_unmap_attachment(obj->import_attach,
  392. msm_obj->sgt, DMA_BIDIRECTIONAL);
  393. dma_buf_detach(dmabuf, obj->import_attach);
  394. obj->import_attach = dma_buf_attach(dmabuf, dev);
  395. if (IS_ERR(obj->import_attach)) {
  396. DRM_ERROR("dma_buf_attach failure, err=%ld\n",
  397. PTR_ERR(obj->import_attach));
  398. ret = PTR_ERR(obj->import_attach);
  399. return ret;
  400. }
  401. /*
  402. * obj->import_attach is created as part of dma_buf_attach.
  403. * Re-apply the dma_map_attr in this case to be in sync
  404. * with iommu_map attrs during map_attachment callback.
  405. */
  406. obj->import_attach->dma_map_attrs |= dma_map_attrs;
  407. msm_obj->obj_dirty = false;
  408. reattach = true;
  409. }
  410. /* perform delayed import for buffers without existing sgt */
  411. if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
  412. || reattach) {
  413. ret = msm_gem_delayed_import(obj);
  414. if (ret) {
  415. DRM_ERROR("delayed dma-buf import failed %d\n",
  416. ret);
  417. msm_obj->obj_dirty = true;
  418. return ret;
  419. }
  420. }
  421. vma = add_vma(obj, aspace);
  422. if (IS_ERR(vma)) {
  423. ret = PTR_ERR(vma);
  424. return ret;
  425. }
  426. pages = get_pages(obj);
  427. if (IS_ERR(pages)) {
  428. ret = PTR_ERR(pages);
  429. goto fail;
  430. }
  431. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  432. obj->size >> PAGE_SHIFT,
  433. msm_obj->flags);
  434. if (ret)
  435. goto fail;
  436. }
  437. *iova = vma->iova;
  438. if (aspace && !msm_obj->in_active_list) {
  439. mutex_lock(&aspace->list_lock);
  440. msm_gem_add_obj_to_aspace_active_list(aspace, obj);
  441. mutex_unlock(&aspace->list_lock);
  442. }
  443. if (dev && !dev_is_dma_coherent(dev) && (msm_obj->flags & MSM_BO_CACHED)) {
  444. dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
  445. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  446. }
  447. return 0;
  448. fail:
  449. del_vma(vma);
  450. return ret;
  451. }
  452. int msm_gem_get_iova(struct drm_gem_object *obj,
  453. struct msm_gem_address_space *aspace, uint64_t *iova)
  454. {
  455. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  456. int ret;
  457. mutex_lock(&msm_obj->lock);
  458. ret = msm_gem_get_iova_locked(obj, aspace, iova);
  459. mutex_unlock(&msm_obj->lock);
  460. return ret;
  461. }
  462. /* get iova without taking a reference, used in places where you have
  463. * already done a 'msm_gem_get_iova()'.
  464. */
  465. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  466. struct msm_gem_address_space *aspace)
  467. {
  468. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  469. struct msm_gem_vma *vma;
  470. mutex_lock(&msm_obj->lock);
  471. vma = lookup_vma(obj, aspace);
  472. mutex_unlock(&msm_obj->lock);
  473. WARN_ON(!vma);
  474. return vma ? vma->iova : 0;
  475. }
  476. /*
  477. * Unpin a iova by updating the reference counts. The memory isn't actually
  478. * purged until something else (shrinker, mm_notifier, destroy, etc) decides
  479. * to get rid of it
  480. */
  481. void msm_gem_unpin_iova(struct drm_gem_object *obj,
  482. struct msm_gem_address_space *aspace)
  483. {
  484. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  485. struct msm_gem_vma *vma;
  486. mutex_lock(&msm_obj->lock);
  487. vma = lookup_vma(obj, aspace);
  488. if (!WARN_ON(!vma))
  489. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  490. msm_obj->flags);
  491. mutex_unlock(&msm_obj->lock);
  492. }
  493. void msm_gem_put_iova(struct drm_gem_object *obj,
  494. struct msm_gem_address_space *aspace)
  495. {
  496. // XXX TODO ..
  497. // NOTE: probably don't need a _locked() version.. we wouldn't
  498. // normally unmap here, but instead just mark that it could be
  499. // unmapped (if the iova refcnt drops to zero), but then later
  500. // if another _get_iova_locked() fails we can start unmapping
  501. // things that are no longer needed..
  502. }
  503. void msm_gem_aspace_domain_attach_detach_update(
  504. struct msm_gem_address_space *aspace,
  505. bool is_detach)
  506. {
  507. struct msm_gem_object *msm_obj;
  508. struct drm_gem_object *obj;
  509. struct aspace_client *aclient;
  510. int ret;
  511. uint64_t iova;
  512. if (!aspace)
  513. return;
  514. mutex_lock(&aspace->list_lock);
  515. if (is_detach) {
  516. /* Indicate to clients domain is getting detached */
  517. list_for_each_entry(aclient, &aspace->clients, list) {
  518. if (aclient->cb)
  519. aclient->cb(aclient->cb_data,
  520. is_detach);
  521. }
  522. /**
  523. * Unmap active buffers,
  524. * typically clients should do this when the callback is called,
  525. * but this needs to be done for the buffers which are not
  526. * attached to any planes.
  527. */
  528. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  529. obj = &msm_obj->base;
  530. if (obj->import_attach) {
  531. mutex_lock(&msm_obj->lock);
  532. put_iova(obj);
  533. msm_obj->obj_dirty = true;
  534. mutex_unlock(&msm_obj->lock);
  535. }
  536. }
  537. } else {
  538. /* map active buffers */
  539. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  540. obj = &msm_obj->base;
  541. ret = msm_gem_get_iova(obj, aspace, &iova);
  542. if (ret) {
  543. mutex_unlock(&aspace->list_lock);
  544. return;
  545. }
  546. }
  547. /* Indicate to clients domain is attached */
  548. list_for_each_entry(aclient, &aspace->clients, list) {
  549. if (aclient->cb)
  550. aclient->cb(aclient->cb_data,
  551. is_detach);
  552. }
  553. }
  554. mutex_unlock(&aspace->list_lock);
  555. }
  556. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  557. struct drm_mode_create_dumb *args)
  558. {
  559. args->pitch = align_pitch(args->width, args->bpp);
  560. args->size = PAGE_ALIGN(args->pitch * args->height);
  561. return msm_gem_new_handle(dev, file, args->size,
  562. MSM_BO_SCANOUT | MSM_BO_CACHED, &args->handle, "dumb");
  563. }
  564. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  565. uint32_t handle, uint64_t *offset)
  566. {
  567. struct drm_gem_object *obj;
  568. int ret = 0;
  569. /* GEM does all our handle to object mapping */
  570. obj = drm_gem_object_lookup(file, handle);
  571. if (obj == NULL) {
  572. ret = -ENOENT;
  573. goto fail;
  574. }
  575. *offset = msm_gem_mmap_offset(obj);
  576. drm_gem_object_put(obj);
  577. fail:
  578. return ret;
  579. }
  580. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  581. {
  582. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  583. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  584. struct iosys_map map;
  585. #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  586. struct dma_buf_map map;
  587. #endif
  588. int ret = 0;
  589. mutex_lock(&msm_obj->lock);
  590. if (WARN_ON(msm_obj->madv > madv)) {
  591. DISP_DEV_ERR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  592. msm_obj->madv, madv);
  593. mutex_unlock(&msm_obj->lock);
  594. return ERR_PTR(-EBUSY);
  595. }
  596. /* increment vmap_count *before* vmap() call, so shrinker can
  597. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  598. * This guarantees that we won't try to msm_gem_vunmap() this
  599. * same object from within the vmap() call (while we already
  600. * hold msm_obj->lock)
  601. */
  602. msm_obj->vmap_count++;
  603. if (!msm_obj->vaddr) {
  604. struct page **pages = get_pages(obj);
  605. if (IS_ERR(pages)) {
  606. ret = PTR_ERR(pages);
  607. goto fail;
  608. }
  609. if (obj->import_attach) {
  610. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev)) {
  611. ret = dma_buf_begin_cpu_access(
  612. obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  613. if (ret)
  614. goto fail;
  615. }
  616. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  617. ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
  618. if (ret)
  619. goto fail;
  620. msm_obj->vaddr = map.vaddr;
  621. #else
  622. msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
  623. #endif
  624. } else {
  625. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  626. VM_MAP, PAGE_KERNEL);
  627. }
  628. if (msm_obj->vaddr == NULL) {
  629. ret = -ENOMEM;
  630. goto fail;
  631. }
  632. }
  633. mutex_unlock(&msm_obj->lock);
  634. return msm_obj->vaddr;
  635. fail:
  636. msm_obj->vmap_count--;
  637. mutex_unlock(&msm_obj->lock);
  638. return ERR_PTR(ret);
  639. }
  640. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  641. {
  642. return get_vaddr(obj, MSM_MADV_WILLNEED);
  643. }
  644. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  645. {
  646. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  647. mutex_lock(&msm_obj->lock);
  648. WARN_ON(msm_obj->vmap_count < 1);
  649. msm_obj->vmap_count--;
  650. mutex_unlock(&msm_obj->lock);
  651. }
  652. /* Update madvise status, returns true if not purged, else
  653. * false or -errno.
  654. */
  655. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  656. {
  657. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  658. mutex_lock(&msm_obj->lock);
  659. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  660. if (msm_obj->madv != __MSM_MADV_PURGED)
  661. msm_obj->madv = madv;
  662. madv = msm_obj->madv;
  663. mutex_unlock(&msm_obj->lock);
  664. return (madv != __MSM_MADV_PURGED);
  665. }
  666. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  667. {
  668. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  669. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  670. struct iosys_map map = IOSYS_MAP_INIT_VADDR(msm_obj->vaddr);
  671. #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  672. struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
  673. #endif
  674. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  675. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  676. return;
  677. if (obj->import_attach) {
  678. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  679. dma_buf_vunmap(obj->import_attach->dmabuf, &map);
  680. #else
  681. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  682. #endif
  683. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
  684. dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  685. } else {
  686. vunmap(msm_obj->vaddr);
  687. }
  688. msm_obj->vaddr = NULL;
  689. }
  690. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  691. {
  692. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  693. mutex_lock_nested(&msm_obj->lock, subclass);
  694. msm_gem_vunmap_locked(obj);
  695. mutex_unlock(&msm_obj->lock);
  696. }
  697. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  698. {
  699. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  700. bool write = !!(op & MSM_PREP_WRITE);
  701. unsigned long remain =
  702. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  703. long ret;
  704. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  705. ret = dma_resv_wait_timeout(msm_obj->resv, write, true, remain);
  706. #else
  707. ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write, true, remain);
  708. #endif
  709. if (ret == 0)
  710. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  711. else if (ret < 0)
  712. return ret;
  713. /* TODO cache maintenance */
  714. return 0;
  715. }
  716. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  717. {
  718. /* TODO cache maintenance */
  719. return 0;
  720. }
  721. /* don't call directly! Use drm_gem_object_put() and friends */
  722. void msm_gem_free_object(struct drm_gem_object *obj)
  723. {
  724. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  725. struct drm_device *dev = obj->dev;
  726. struct msm_drm_private *priv = dev->dev_private;
  727. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  728. struct iosys_map map = IOSYS_MAP_INIT_VADDR(msm_obj->vaddr);
  729. #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  730. struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
  731. #endif
  732. /* object should not be on active list: */
  733. WARN_ON(is_active(msm_obj));
  734. mutex_lock(&priv->mm_lock);
  735. list_del(&msm_obj->mm_list);
  736. mutex_unlock(&priv->mm_lock);
  737. mutex_lock(&msm_obj->lock);
  738. put_iova(obj);
  739. if (msm_obj->aspace) {
  740. mutex_lock(&msm_obj->aspace->list_lock);
  741. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  742. obj);
  743. mutex_unlock(&msm_obj->aspace->list_lock);
  744. }
  745. if (obj->import_attach) {
  746. if (msm_obj->vaddr)
  747. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  748. dma_buf_vunmap(obj->import_attach->dmabuf, &map);
  749. #else
  750. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  751. #endif
  752. /* Don't drop the pages for imported dmabuf, as they are not
  753. * ours, just free the array we allocated:
  754. */
  755. if (msm_obj->pages)
  756. kvfree(msm_obj->pages);
  757. drm_prime_gem_destroy(obj, msm_obj->sgt);
  758. } else {
  759. msm_gem_vunmap_locked(obj);
  760. put_pages(obj);
  761. }
  762. if (msm_obj->resv == &msm_obj->_resv)
  763. dma_resv_fini(msm_obj->resv);
  764. drm_gem_object_release(obj);
  765. mutex_unlock(&msm_obj->lock);
  766. kfree(msm_obj);
  767. }
  768. /* convenience method to construct a GEM buffer object, and userspace handle */
  769. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  770. uint32_t size, uint32_t flags, uint32_t *handle,
  771. char *name)
  772. {
  773. struct drm_gem_object *obj;
  774. int ret;
  775. obj = msm_gem_new(dev, size, flags);
  776. if (IS_ERR(obj))
  777. return PTR_ERR(obj);
  778. if (name)
  779. msm_gem_object_set_name(obj, "%s", name);
  780. ret = drm_gem_handle_create(file, obj, handle);
  781. /* drop reference from allocate - handle holds it now */
  782. drm_gem_object_put(obj);
  783. return ret;
  784. }
  785. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  786. static const struct vm_operations_struct vm_ops = {
  787. .fault = msm_gem_fault,
  788. .open = drm_gem_vm_open,
  789. .close = drm_gem_vm_close,
  790. };
  791. static const struct drm_gem_object_funcs msm_gem_object_funcs = {
  792. .free = msm_gem_free_object,
  793. .pin = msm_gem_prime_pin,
  794. .unpin = msm_gem_prime_unpin,
  795. .get_sg_table = msm_gem_prime_get_sg_table,
  796. .vmap = msm_gem_prime_vmap,
  797. .vunmap = msm_gem_prime_vunmap,
  798. .vm_ops = &vm_ops,
  799. };
  800. #endif
  801. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  802. static int msm_gem_new_impl(struct drm_device *dev,
  803. uint32_t size, uint32_t flags,
  804. struct dma_resv *resv,
  805. struct drm_gem_object **obj)
  806. {
  807. #else
  808. static int msm_gem_new_impl(struct drm_device *dev,
  809. uint32_t size, uint32_t flags,
  810. struct dma_resv *resv,
  811. struct drm_gem_object **obj,
  812. bool struct_mutex_locked)
  813. {
  814. struct msm_drm_private *priv = dev->dev_private;
  815. #endif
  816. struct msm_gem_object *msm_obj;
  817. switch (flags & MSM_BO_CACHE_MASK) {
  818. case MSM_BO_UNCACHED:
  819. case MSM_BO_CACHED:
  820. case MSM_BO_WC:
  821. break;
  822. default:
  823. DISP_DEV_ERR(dev->dev, "invalid cache flag: %x\n",
  824. (flags & MSM_BO_CACHE_MASK));
  825. return -EINVAL;
  826. }
  827. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  828. if (!msm_obj)
  829. return -ENOMEM;
  830. mutex_init(&msm_obj->lock);
  831. msm_obj->flags = flags;
  832. msm_obj->madv = MSM_MADV_WILLNEED;
  833. if (resv) {
  834. msm_obj->resv = resv;
  835. } else {
  836. msm_obj->resv = &msm_obj->_resv;
  837. dma_resv_init(msm_obj->resv);
  838. }
  839. INIT_LIST_HEAD(&msm_obj->submit_entry);
  840. INIT_LIST_HEAD(&msm_obj->vmas);
  841. INIT_LIST_HEAD(&msm_obj->iova_list);
  842. msm_obj->aspace = msm_gem_smmu_address_space_get(dev,
  843. MSM_SMMU_DOMAIN_UNSECURE);
  844. if (IS_ERR(msm_obj->aspace))
  845. msm_obj->aspace = NULL;
  846. msm_obj->in_active_list = false;
  847. msm_obj->obj_dirty = false;
  848. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  849. mutex_lock(&priv->mm_lock);
  850. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  851. mutex_unlock(&priv->mm_lock);
  852. #endif
  853. *obj = &msm_obj->base;
  854. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  855. (*obj)->funcs = &msm_gem_object_funcs;
  856. #endif
  857. return 0;
  858. }
  859. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  860. struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
  861. #else
  862. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  863. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  864. #endif
  865. {
  866. struct msm_drm_private *priv = dev->dev_private;
  867. struct msm_gem_object *msm_obj;
  868. struct drm_gem_object *obj = NULL;
  869. bool use_vram = false;
  870. int ret;
  871. size = PAGE_ALIGN(size);
  872. if (!iommu_present(&platform_bus_type))
  873. use_vram = true;
  874. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  875. use_vram = true;
  876. if (WARN_ON(use_vram && !priv->vram.size))
  877. return ERR_PTR(-EINVAL);
  878. /* Disallow zero sized objects as they make the underlying
  879. * infrastructure grumpy
  880. */
  881. if (size == 0)
  882. return ERR_PTR(-EINVAL);
  883. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  884. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
  885. #else
  886. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  887. #endif
  888. if (ret)
  889. goto fail;
  890. msm_obj = to_msm_bo(obj);
  891. if (use_vram) {
  892. struct msm_gem_vma *vma;
  893. struct page **pages;
  894. mutex_lock(&msm_obj->lock);
  895. vma = add_vma(obj, NULL);
  896. mutex_unlock(&msm_obj->lock);
  897. if (IS_ERR(vma)) {
  898. ret = PTR_ERR(vma);
  899. goto fail;
  900. }
  901. to_msm_bo(obj)->vram_node = &vma->node;
  902. drm_gem_private_object_init(dev, obj, size);
  903. pages = get_pages(obj);
  904. if (IS_ERR(pages)) {
  905. ret = PTR_ERR(pages);
  906. goto fail;
  907. }
  908. vma->iova = physaddr(obj);
  909. } else {
  910. ret = drm_gem_object_init(dev, obj, size);
  911. if (ret)
  912. goto fail;
  913. /*
  914. * Our buffers are kept pinned, so allocating them from the
  915. * MOVABLE zone is a really bad idea, and conflicts with CMA.
  916. * See comments above new_inode() why this is required _and_
  917. * expected if you're going to pin these pages.
  918. */
  919. mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
  920. }
  921. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  922. mutex_lock(&priv->mm_lock);
  923. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  924. mutex_unlock(&priv->mm_lock);
  925. #endif
  926. return obj;
  927. fail:
  928. drm_gem_object_put(obj);
  929. return ERR_PTR(ret);
  930. }
  931. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  932. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  933. uint32_t size, uint32_t flags)
  934. {
  935. return _msm_gem_new(dev, size, flags, true);
  936. }
  937. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  938. uint32_t size, uint32_t flags)
  939. {
  940. return _msm_gem_new(dev, size, flags, false);
  941. }
  942. #endif
  943. int msm_gem_delayed_import(struct drm_gem_object *obj)
  944. {
  945. struct dma_buf_attachment *attach;
  946. struct sg_table *sgt;
  947. struct msm_gem_object *msm_obj;
  948. int ret = 0;
  949. if (!obj) {
  950. DRM_ERROR("NULL drm gem object\n");
  951. return -EINVAL;
  952. }
  953. msm_obj = to_msm_bo(obj);
  954. if (!obj->import_attach) {
  955. DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
  956. return -EINVAL;
  957. }
  958. attach = obj->import_attach;
  959. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  960. /*
  961. * dma_buf_map_attachment will call dma_map_sg for ion buffer
  962. * mapping, and iova will get mapped when the function returns.
  963. */
  964. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  965. if (IS_ERR(sgt)) {
  966. ret = PTR_ERR(sgt);
  967. DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
  968. ret);
  969. goto fail_import;
  970. }
  971. msm_obj->sgt = sgt;
  972. msm_obj->pages = NULL;
  973. fail_import:
  974. return ret;
  975. }
  976. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  977. struct dma_buf *dmabuf, struct sg_table *sgt)
  978. {
  979. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  980. struct msm_drm_private *priv = dev->dev_private;
  981. #endif
  982. struct msm_gem_object *msm_obj;
  983. struct drm_gem_object *obj = NULL;
  984. uint32_t size;
  985. int ret;
  986. size = PAGE_ALIGN(dmabuf->size);
  987. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  988. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
  989. #else
  990. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
  991. #endif
  992. if (ret)
  993. goto fail;
  994. drm_gem_private_object_init(dev, obj, size);
  995. msm_obj = to_msm_bo(obj);
  996. mutex_lock(&msm_obj->lock);
  997. msm_obj->sgt = sgt;
  998. msm_obj->pages = NULL;
  999. /*
  1000. * 1) If sg table is NULL, user should call msm_gem_delayed_import
  1001. * to add back the sg table to the drm gem object.
  1002. *
  1003. * 2) Add buffer flag unconditionally for all import cases.
  1004. * # Cached buffer will be attached immediately hence sgt will
  1005. * be available upon gem obj creation.
  1006. * # Un-cached buffer will follow delayed attach hence sgt
  1007. * will be NULL upon gem obj creation.
  1008. */
  1009. msm_obj->flags |= MSM_BO_EXTBUF;
  1010. mutex_unlock(&msm_obj->lock);
  1011. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  1012. mutex_lock(&priv->mm_lock);
  1013. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  1014. mutex_unlock(&priv->mm_lock);
  1015. #endif
  1016. return obj;
  1017. fail:
  1018. drm_gem_object_put(obj);
  1019. return ERR_PTR(ret);
  1020. }
  1021. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  1022. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1023. uint32_t flags, struct msm_gem_address_space *aspace,
  1024. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  1025. {
  1026. void *vaddr;
  1027. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  1028. int ret;
  1029. if (IS_ERR(obj))
  1030. return ERR_CAST(obj);
  1031. if (iova) {
  1032. ret = msm_gem_get_iova(obj, aspace, iova);
  1033. if (ret)
  1034. goto err;
  1035. }
  1036. vaddr = msm_gem_get_vaddr(obj);
  1037. if (IS_ERR(vaddr)) {
  1038. msm_gem_put_iova(obj, aspace);
  1039. ret = PTR_ERR(vaddr);
  1040. goto err;
  1041. }
  1042. if (bo)
  1043. *bo = obj;
  1044. return vaddr;
  1045. err:
  1046. if (locked)
  1047. drm_gem_object_put_locked(obj);
  1048. else
  1049. drm_gem_object_put(obj);
  1050. return ERR_PTR(ret);
  1051. }
  1052. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1053. uint32_t flags, struct msm_gem_address_space *aspace,
  1054. struct drm_gem_object **bo, uint64_t *iova)
  1055. {
  1056. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  1057. }
  1058. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  1059. uint32_t flags, struct msm_gem_address_space *aspace,
  1060. struct drm_gem_object **bo, uint64_t *iova)
  1061. {
  1062. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  1063. }
  1064. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1065. struct msm_gem_address_space *aspace, bool locked)
  1066. {
  1067. if (IS_ERR_OR_NULL(bo))
  1068. return;
  1069. msm_gem_put_vaddr(bo);
  1070. msm_gem_unpin_iova(bo, aspace);
  1071. if (locked)
  1072. drm_gem_object_put_locked(bo);
  1073. else
  1074. drm_gem_object_put(bo);
  1075. }
  1076. #endif
  1077. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1078. {
  1079. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1080. va_list ap;
  1081. if (!fmt)
  1082. return;
  1083. va_start(ap, fmt);
  1084. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1085. va_end(ap);
  1086. }
  1087. void msm_gem_put_buffer(struct drm_gem_object *gem)
  1088. {
  1089. struct msm_gem_object *msm_gem;
  1090. if (!gem)
  1091. return;
  1092. msm_gem = to_msm_bo(gem);
  1093. msm_gem_put_iova(gem, msm_gem->aspace);
  1094. msm_gem_put_vaddr(gem);
  1095. }
  1096. int msm_gem_get_buffer(struct drm_gem_object *gem,
  1097. struct drm_device *dev, struct drm_framebuffer *fb,
  1098. uint32_t align_size)
  1099. {
  1100. struct msm_gem_object *msm_gem;
  1101. uint32_t size;
  1102. uint64_t iova_aligned;
  1103. int ret = -EINVAL;
  1104. if (!gem) {
  1105. DRM_ERROR("invalid drm gem");
  1106. return ret;
  1107. }
  1108. msm_gem = to_msm_bo(gem);
  1109. size = PAGE_ALIGN(gem->size);
  1110. if (size < (align_size + GUARD_BYTES)) {
  1111. DRM_ERROR("invalid gem size");
  1112. goto exit;
  1113. }
  1114. msm_gem_smmu_address_space_get(dev, MSM_SMMU_DOMAIN_UNSECURE);
  1115. if (PTR_ERR(msm_gem->aspace) == -ENODEV) {
  1116. DRM_DEBUG("IOMMU not present, relying on VRAM.");
  1117. } else if (IS_ERR_OR_NULL(msm_gem->aspace)) {
  1118. ret = PTR_ERR(msm_gem->aspace);
  1119. DRM_ERROR("failed to get aspace");
  1120. goto exit;
  1121. }
  1122. ret = msm_gem_get_iova(gem, msm_gem->aspace, &msm_gem->iova);
  1123. if (ret) {
  1124. DRM_ERROR("failed to get the iova ret %d", ret);
  1125. goto exit;
  1126. }
  1127. msm_gem_get_vaddr(gem);
  1128. if (IS_ERR_OR_NULL(msm_gem->vaddr)) {
  1129. DRM_ERROR("failed to get vaddr");
  1130. goto exit;
  1131. }
  1132. iova_aligned = (msm_gem->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  1133. msm_gem->offset = iova_aligned - msm_gem->iova;
  1134. msm_gem->iova = msm_gem->iova + msm_gem->offset;
  1135. return 0;
  1136. exit:
  1137. msm_gem_put_buffer(gem);
  1138. return ret;
  1139. }
  1140. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  1141. MODULE_IMPORT_NS(DMA_BUF);
  1142. #endif