msm_gem.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360
  1. /*
  2. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/qcom-dma-mapping.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/shmem_fs.h>
  21. #include <linux/dma-buf.h>
  22. #include <linux/pfn_t.h>
  23. #include <linux/version.h>
  24. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  25. #include <linux/ion.h>
  26. #endif
  27. #include "msm_drv.h"
  28. #include "msm_gem.h"
  29. #include "msm_mmu.h"
  30. #include "sde_dbg.h"
  31. #define GUARD_BYTES (BIT(8) - 1)
  32. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  33. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  34. static dma_addr_t physaddr(struct drm_gem_object *obj)
  35. {
  36. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  37. struct msm_drm_private *priv = obj->dev->dev_private;
  38. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  39. priv->vram.paddr;
  40. }
  41. static bool use_pages(struct drm_gem_object *obj)
  42. {
  43. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  44. return !msm_obj->vram_node;
  45. }
  46. /* allocate pages from VRAM carveout, used when no IOMMU: */
  47. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  48. {
  49. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  50. struct msm_drm_private *priv = obj->dev->dev_private;
  51. dma_addr_t paddr;
  52. struct page **p;
  53. int ret, i;
  54. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  55. if (!p)
  56. return ERR_PTR(-ENOMEM);
  57. spin_lock(&priv->vram.lock);
  58. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  59. spin_unlock(&priv->vram.lock);
  60. if (ret) {
  61. kvfree(p);
  62. return ERR_PTR(ret);
  63. }
  64. paddr = physaddr(obj);
  65. for (i = 0; i < npages; i++) {
  66. p[i] = phys_to_page(paddr);
  67. paddr += PAGE_SIZE;
  68. }
  69. return p;
  70. }
  71. static struct page **get_pages(struct drm_gem_object *obj)
  72. {
  73. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  74. struct device *aspace_dev;
  75. if (obj->import_attach)
  76. return msm_obj->pages;
  77. if (!msm_obj->pages) {
  78. struct drm_device *dev = obj->dev;
  79. struct page **p;
  80. int npages = obj->size >> PAGE_SHIFT;
  81. if (use_pages(obj))
  82. p = drm_gem_get_pages(obj);
  83. else
  84. p = get_pages_vram(obj, npages);
  85. if (IS_ERR(p)) {
  86. dev_err(dev->dev, "could not get pages: %ld\n",
  87. PTR_ERR(p));
  88. return p;
  89. }
  90. msm_obj->pages = p;
  91. msm_obj->sgt = drm_prime_pages_to_sg(dev, p, npages);
  92. if (IS_ERR(msm_obj->sgt)) {
  93. void *ptr = ERR_CAST(msm_obj->sgt);
  94. dev_err(dev->dev, "failed to allocate sgt\n");
  95. msm_obj->sgt = NULL;
  96. return ptr;
  97. }
  98. if (msm_obj->vram_node) {
  99. goto end;
  100. /*
  101. * For non-cached buffers, ensure the new pages are clean
  102. * because display controller, GPU, etc. are not coherent
  103. */
  104. } else if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
  105. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  106. if (aspace_dev) {
  107. dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
  108. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  109. /* mark the buffer as external buffer */
  110. msm_obj->flags |= MSM_BO_EXTBUF;
  111. } else {
  112. DRM_ERROR("failed to get aspace_device\n");
  113. }
  114. }
  115. }
  116. end:
  117. return msm_obj->pages;
  118. }
  119. static void put_pages_vram(struct drm_gem_object *obj)
  120. {
  121. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  122. struct msm_drm_private *priv = obj->dev->dev_private;
  123. spin_lock(&priv->vram.lock);
  124. drm_mm_remove_node(msm_obj->vram_node);
  125. spin_unlock(&priv->vram.lock);
  126. kvfree(msm_obj->pages);
  127. }
  128. static void put_pages(struct drm_gem_object *obj)
  129. {
  130. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  131. if (msm_obj->pages) {
  132. if (msm_obj->sgt) {
  133. sg_free_table(msm_obj->sgt);
  134. kfree(msm_obj->sgt);
  135. }
  136. if (use_pages(obj))
  137. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  138. else
  139. put_pages_vram(obj);
  140. msm_obj->pages = NULL;
  141. }
  142. }
  143. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  144. {
  145. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  146. struct page **p;
  147. mutex_lock(&msm_obj->lock);
  148. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  149. mutex_unlock(&msm_obj->lock);
  150. return ERR_PTR(-EBUSY);
  151. }
  152. p = get_pages(obj);
  153. mutex_unlock(&msm_obj->lock);
  154. return p;
  155. }
  156. void msm_gem_put_pages(struct drm_gem_object *obj)
  157. {
  158. /* when we start tracking the pin count, then do something here */
  159. }
  160. void msm_gem_sync(struct drm_gem_object *obj)
  161. {
  162. struct msm_gem_object *msm_obj;
  163. struct device *aspace_dev;
  164. if (!obj)
  165. return;
  166. msm_obj = to_msm_bo(obj);
  167. if (msm_obj->vram_node)
  168. return;
  169. /*
  170. * dma_sync_sg_for_device synchronises a single contiguous or
  171. * scatter/gather mapping for the CPU and device.
  172. */
  173. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  174. if (aspace_dev)
  175. dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
  176. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  177. else
  178. DRM_ERROR("failed to get aspace_device\n");
  179. }
  180. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  181. struct vm_area_struct *vma)
  182. {
  183. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  184. vma->vm_flags &= ~VM_PFNMAP;
  185. vma->vm_flags |= VM_MIXEDMAP;
  186. if (msm_obj->flags & MSM_BO_WC) {
  187. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  188. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  189. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  190. } else {
  191. /*
  192. * Shunt off cached objs to shmem file so they have their own
  193. * address_space (so unmap_mapping_range does what we want,
  194. * in particular in the case of mmap'd dmabufs)
  195. */
  196. fput(vma->vm_file);
  197. get_file(obj->filp);
  198. vma->vm_pgoff = 0;
  199. vma->vm_file = obj->filp;
  200. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  201. }
  202. return 0;
  203. }
  204. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  205. {
  206. int ret;
  207. ret = drm_gem_mmap(filp, vma);
  208. if (ret) {
  209. DBG("mmap failed: %d", ret);
  210. return ret;
  211. }
  212. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  213. }
  214. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  215. static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  216. #else
  217. vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  218. #endif
  219. {
  220. struct vm_area_struct *vma = vmf->vma;
  221. struct drm_gem_object *obj = vma->vm_private_data;
  222. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  223. struct page **pages;
  224. unsigned long pfn;
  225. pgoff_t pgoff;
  226. int err;
  227. vm_fault_t ret;
  228. /*
  229. * vm_ops.open/drm_gem_mmap_obj and close get and put
  230. * a reference on obj. So, we dont need to hold one here.
  231. */
  232. err = mutex_lock_interruptible(&msm_obj->lock);
  233. if (err) {
  234. ret = VM_FAULT_NOPAGE;
  235. goto out;
  236. }
  237. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  238. mutex_unlock(&msm_obj->lock);
  239. return VM_FAULT_SIGBUS;
  240. }
  241. /* make sure we have pages attached now */
  242. pages = get_pages(obj);
  243. if (IS_ERR(pages)) {
  244. ret = vmf_error(PTR_ERR(pages));
  245. goto out_unlock;
  246. }
  247. /* We don't use vmf->pgoff since that has the fake offset: */
  248. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  249. pfn = page_to_pfn(pages[pgoff]);
  250. VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
  251. pfn, pfn << PAGE_SHIFT);
  252. ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  253. out_unlock:
  254. mutex_unlock(&msm_obj->lock);
  255. out:
  256. return ret;
  257. }
  258. /** get mmap offset */
  259. static uint64_t mmap_offset(struct drm_gem_object *obj)
  260. {
  261. struct drm_device *dev = obj->dev;
  262. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  263. int ret;
  264. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  265. /* Make it mmapable */
  266. ret = drm_gem_create_mmap_offset(obj);
  267. if (ret) {
  268. dev_err(dev->dev, "could not allocate mmap offset\n");
  269. return 0;
  270. }
  271. return drm_vma_node_offset_addr(&obj->vma_node);
  272. }
  273. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  274. {
  275. uint64_t offset;
  276. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  277. mutex_lock(&msm_obj->lock);
  278. offset = mmap_offset(obj);
  279. mutex_unlock(&msm_obj->lock);
  280. return offset;
  281. }
  282. dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
  283. {
  284. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  285. struct sg_table *sgt;
  286. if (!msm_obj->sgt) {
  287. sgt = dma_buf_map_attachment(obj->import_attach,
  288. DMA_BIDIRECTIONAL);
  289. if (IS_ERR_OR_NULL(sgt)) {
  290. DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
  291. PTR_ERR(sgt));
  292. return 0;
  293. }
  294. msm_obj->sgt = sgt;
  295. }
  296. return sg_phys(msm_obj->sgt->sgl);
  297. }
  298. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  299. struct msm_gem_address_space *aspace)
  300. {
  301. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  302. struct msm_gem_vma *vma;
  303. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  304. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  305. if (!vma)
  306. return ERR_PTR(-ENOMEM);
  307. vma->aspace = aspace;
  308. msm_obj->aspace = aspace;
  309. list_add_tail(&vma->list, &msm_obj->vmas);
  310. return vma;
  311. }
  312. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  313. struct msm_gem_address_space *aspace)
  314. {
  315. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  316. struct msm_gem_vma *vma;
  317. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  318. list_for_each_entry(vma, &msm_obj->vmas, list) {
  319. if (vma->aspace == aspace)
  320. return vma;
  321. }
  322. return NULL;
  323. }
  324. static void del_vma(struct msm_gem_vma *vma)
  325. {
  326. if (!vma)
  327. return;
  328. list_del(&vma->list);
  329. kfree(vma);
  330. }
  331. /* Called with msm_obj->lock locked */
  332. static void
  333. put_iova(struct drm_gem_object *obj)
  334. {
  335. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  336. struct msm_gem_vma *vma, *tmp;
  337. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  338. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  339. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  340. msm_obj->flags);
  341. /*
  342. * put_iova removes the domain connected to the obj which makes
  343. * the aspace inaccessible. Store the aspace, as it is used to
  344. * update the active_list during gem_free_obj and gem_purge.
  345. */
  346. msm_obj->aspace = vma->aspace;
  347. del_vma(vma);
  348. }
  349. }
  350. /* get iova, taking a reference. Should have a matching put */
  351. static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
  352. struct msm_gem_address_space *aspace, uint64_t *iova)
  353. {
  354. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  355. struct msm_gem_vma *vma;
  356. int ret = 0;
  357. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  358. vma = lookup_vma(obj, aspace);
  359. if (!vma) {
  360. struct page **pages;
  361. struct device *dev;
  362. struct dma_buf *dmabuf;
  363. bool reattach = false;
  364. unsigned long dma_map_attrs;
  365. dev = msm_gem_get_aspace_device(aspace);
  366. if ((dev && obj->import_attach) &&
  367. ((dev != obj->import_attach->dev) ||
  368. msm_obj->obj_dirty)) {
  369. dmabuf = obj->import_attach->dmabuf;
  370. dma_map_attrs = obj->import_attach->dma_map_attrs;
  371. DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
  372. obj->import_attach->dev, dev);
  373. SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
  374. msm_obj->obj_dirty);
  375. if (msm_obj->sgt)
  376. dma_buf_unmap_attachment(obj->import_attach,
  377. msm_obj->sgt, DMA_BIDIRECTIONAL);
  378. dma_buf_detach(dmabuf, obj->import_attach);
  379. obj->import_attach = dma_buf_attach(dmabuf, dev);
  380. if (IS_ERR(obj->import_attach)) {
  381. DRM_ERROR("dma_buf_attach failure, err=%ld\n",
  382. PTR_ERR(obj->import_attach));
  383. ret = PTR_ERR(obj->import_attach);
  384. return ret;
  385. }
  386. /*
  387. * obj->import_attach is created as part of dma_buf_attach.
  388. * Re-apply the dma_map_attr in this case to be in sync
  389. * with iommu_map attrs during map_attachment callback.
  390. */
  391. obj->import_attach->dma_map_attrs |= dma_map_attrs;
  392. msm_obj->obj_dirty = false;
  393. reattach = true;
  394. }
  395. /* perform delayed import for buffers without existing sgt */
  396. if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
  397. || reattach) {
  398. ret = msm_gem_delayed_import(obj);
  399. if (ret) {
  400. DRM_ERROR("delayed dma-buf import failed %d\n",
  401. ret);
  402. msm_obj->obj_dirty = true;
  403. return ret;
  404. }
  405. }
  406. vma = add_vma(obj, aspace);
  407. if (IS_ERR(vma)) {
  408. ret = PTR_ERR(vma);
  409. return ret;
  410. }
  411. pages = get_pages(obj);
  412. if (IS_ERR(pages)) {
  413. ret = PTR_ERR(pages);
  414. goto fail;
  415. }
  416. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  417. obj->size >> PAGE_SHIFT,
  418. msm_obj->flags);
  419. if (ret)
  420. goto fail;
  421. }
  422. *iova = vma->iova;
  423. if (aspace && !msm_obj->in_active_list) {
  424. mutex_lock(&aspace->list_lock);
  425. msm_gem_add_obj_to_aspace_active_list(aspace, obj);
  426. mutex_unlock(&aspace->list_lock);
  427. }
  428. return 0;
  429. fail:
  430. del_vma(vma);
  431. return ret;
  432. }
  433. int msm_gem_get_iova(struct drm_gem_object *obj,
  434. struct msm_gem_address_space *aspace, uint64_t *iova)
  435. {
  436. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  437. int ret;
  438. mutex_lock(&msm_obj->lock);
  439. ret = msm_gem_get_iova_locked(obj, aspace, iova);
  440. mutex_unlock(&msm_obj->lock);
  441. return ret;
  442. }
  443. /* get iova without taking a reference, used in places where you have
  444. * already done a 'msm_gem_get_iova()'.
  445. */
  446. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  447. struct msm_gem_address_space *aspace)
  448. {
  449. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  450. struct msm_gem_vma *vma;
  451. mutex_lock(&msm_obj->lock);
  452. vma = lookup_vma(obj, aspace);
  453. mutex_unlock(&msm_obj->lock);
  454. WARN_ON(!vma);
  455. return vma ? vma->iova : 0;
  456. }
  457. /*
  458. * Unpin a iova by updating the reference counts. The memory isn't actually
  459. * purged until something else (shrinker, mm_notifier, destroy, etc) decides
  460. * to get rid of it
  461. */
  462. void msm_gem_unpin_iova(struct drm_gem_object *obj,
  463. struct msm_gem_address_space *aspace)
  464. {
  465. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  466. struct msm_gem_vma *vma;
  467. mutex_lock(&msm_obj->lock);
  468. vma = lookup_vma(obj, aspace);
  469. if (!WARN_ON(!vma))
  470. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  471. msm_obj->flags);
  472. mutex_unlock(&msm_obj->lock);
  473. }
  474. void msm_gem_put_iova(struct drm_gem_object *obj,
  475. struct msm_gem_address_space *aspace)
  476. {
  477. // XXX TODO ..
  478. // NOTE: probably don't need a _locked() version.. we wouldn't
  479. // normally unmap here, but instead just mark that it could be
  480. // unmapped (if the iova refcnt drops to zero), but then later
  481. // if another _get_iova_locked() fails we can start unmapping
  482. // things that are no longer needed..
  483. }
  484. void msm_gem_aspace_domain_attach_detach_update(
  485. struct msm_gem_address_space *aspace,
  486. bool is_detach)
  487. {
  488. struct msm_gem_object *msm_obj;
  489. struct drm_gem_object *obj;
  490. struct aspace_client *aclient;
  491. int ret;
  492. uint64_t iova;
  493. if (!aspace)
  494. return;
  495. mutex_lock(&aspace->list_lock);
  496. if (is_detach) {
  497. /* Indicate to clients domain is getting detached */
  498. list_for_each_entry(aclient, &aspace->clients, list) {
  499. if (aclient->cb)
  500. aclient->cb(aclient->cb_data,
  501. is_detach);
  502. }
  503. /**
  504. * Unmap active buffers,
  505. * typically clients should do this when the callback is called,
  506. * but this needs to be done for the buffers which are not
  507. * attached to any planes.
  508. */
  509. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  510. obj = &msm_obj->base;
  511. if (obj->import_attach) {
  512. mutex_lock(&msm_obj->lock);
  513. put_iova(obj);
  514. msm_obj->obj_dirty = true;
  515. mutex_unlock(&msm_obj->lock);
  516. }
  517. }
  518. } else {
  519. /* map active buffers */
  520. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  521. obj = &msm_obj->base;
  522. ret = msm_gem_get_iova(obj, aspace, &iova);
  523. if (ret) {
  524. mutex_unlock(&aspace->list_lock);
  525. return;
  526. }
  527. }
  528. /* Indicate to clients domain is attached */
  529. list_for_each_entry(aclient, &aspace->clients, list) {
  530. if (aclient->cb)
  531. aclient->cb(aclient->cb_data,
  532. is_detach);
  533. }
  534. }
  535. mutex_unlock(&aspace->list_lock);
  536. }
  537. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  538. struct drm_mode_create_dumb *args)
  539. {
  540. args->pitch = align_pitch(args->width, args->bpp);
  541. args->size = PAGE_ALIGN(args->pitch * args->height);
  542. return msm_gem_new_handle(dev, file, args->size,
  543. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
  544. }
  545. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  546. uint32_t handle, uint64_t *offset)
  547. {
  548. struct drm_gem_object *obj;
  549. int ret = 0;
  550. /* GEM does all our handle to object mapping */
  551. obj = drm_gem_object_lookup(file, handle);
  552. if (obj == NULL) {
  553. ret = -ENOENT;
  554. goto fail;
  555. }
  556. *offset = msm_gem_mmap_offset(obj);
  557. drm_gem_object_put(obj);
  558. fail:
  559. return ret;
  560. }
  561. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  562. {
  563. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  564. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  565. struct dma_buf_map map;
  566. #endif
  567. int ret = 0;
  568. mutex_lock(&msm_obj->lock);
  569. if (WARN_ON(msm_obj->madv > madv)) {
  570. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  571. msm_obj->madv, madv);
  572. mutex_unlock(&msm_obj->lock);
  573. return ERR_PTR(-EBUSY);
  574. }
  575. /* increment vmap_count *before* vmap() call, so shrinker can
  576. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  577. * This guarantees that we won't try to msm_gem_vunmap() this
  578. * same object from within the vmap() call (while we already
  579. * hold msm_obj->lock)
  580. */
  581. msm_obj->vmap_count++;
  582. if (!msm_obj->vaddr) {
  583. struct page **pages = get_pages(obj);
  584. if (IS_ERR(pages)) {
  585. ret = PTR_ERR(pages);
  586. goto fail;
  587. }
  588. if (obj->import_attach) {
  589. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev)) {
  590. ret = dma_buf_begin_cpu_access(
  591. obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  592. if (ret)
  593. goto fail;
  594. }
  595. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  596. ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
  597. if (ret)
  598. goto fail;
  599. msm_obj->vaddr = map.vaddr;
  600. #else
  601. msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
  602. #endif
  603. } else {
  604. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  605. VM_MAP, PAGE_KERNEL);
  606. }
  607. if (msm_obj->vaddr == NULL) {
  608. ret = -ENOMEM;
  609. goto fail;
  610. }
  611. }
  612. mutex_unlock(&msm_obj->lock);
  613. return msm_obj->vaddr;
  614. fail:
  615. msm_obj->vmap_count--;
  616. mutex_unlock(&msm_obj->lock);
  617. return ERR_PTR(ret);
  618. }
  619. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  620. {
  621. return get_vaddr(obj, MSM_MADV_WILLNEED);
  622. }
  623. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  624. {
  625. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  626. mutex_lock(&msm_obj->lock);
  627. WARN_ON(msm_obj->vmap_count < 1);
  628. msm_obj->vmap_count--;
  629. mutex_unlock(&msm_obj->lock);
  630. }
  631. /* Update madvise status, returns true if not purged, else
  632. * false or -errno.
  633. */
  634. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  635. {
  636. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  637. mutex_lock(&msm_obj->lock);
  638. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  639. if (msm_obj->madv != __MSM_MADV_PURGED)
  640. msm_obj->madv = madv;
  641. madv = msm_obj->madv;
  642. mutex_unlock(&msm_obj->lock);
  643. return (madv != __MSM_MADV_PURGED);
  644. }
  645. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  646. {
  647. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  648. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  649. struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
  650. #endif
  651. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  652. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  653. return;
  654. if (obj->import_attach) {
  655. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  656. dma_buf_vunmap(obj->import_attach->dmabuf, &map);
  657. dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  658. #else
  659. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  660. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
  661. dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  662. #endif
  663. } else {
  664. vunmap(msm_obj->vaddr);
  665. }
  666. msm_obj->vaddr = NULL;
  667. }
  668. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  669. {
  670. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  671. mutex_lock_nested(&msm_obj->lock, subclass);
  672. msm_gem_vunmap_locked(obj);
  673. mutex_unlock(&msm_obj->lock);
  674. }
  675. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  676. {
  677. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  678. bool write = !!(op & MSM_PREP_WRITE);
  679. unsigned long remain =
  680. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  681. long ret;
  682. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  683. ret = dma_resv_wait_timeout(msm_obj->resv, write, true, remain);
  684. #else
  685. ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write, true, remain);
  686. #endif
  687. if (ret == 0)
  688. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  689. else if (ret < 0)
  690. return ret;
  691. /* TODO cache maintenance */
  692. return 0;
  693. }
  694. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  695. {
  696. /* TODO cache maintenance */
  697. return 0;
  698. }
  699. /* don't call directly! Use drm_gem_object_put() and friends */
  700. void msm_gem_free_object(struct drm_gem_object *obj)
  701. {
  702. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  703. struct drm_device *dev = obj->dev;
  704. struct msm_drm_private *priv = dev->dev_private;
  705. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  706. struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
  707. #endif
  708. /* object should not be on active list: */
  709. WARN_ON(is_active(msm_obj));
  710. mutex_lock(&priv->mm_lock);
  711. list_del(&msm_obj->mm_list);
  712. mutex_unlock(&priv->mm_lock);
  713. mutex_lock(&msm_obj->lock);
  714. put_iova(obj);
  715. if (msm_obj->aspace) {
  716. mutex_lock(&msm_obj->aspace->list_lock);
  717. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  718. obj);
  719. mutex_unlock(&msm_obj->aspace->list_lock);
  720. }
  721. if (obj->import_attach) {
  722. if (msm_obj->vaddr)
  723. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  724. dma_buf_vunmap(obj->import_attach->dmabuf, &map);
  725. #else
  726. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  727. #endif
  728. /* Don't drop the pages for imported dmabuf, as they are not
  729. * ours, just free the array we allocated:
  730. */
  731. if (msm_obj->pages)
  732. kvfree(msm_obj->pages);
  733. drm_prime_gem_destroy(obj, msm_obj->sgt);
  734. } else {
  735. msm_gem_vunmap_locked(obj);
  736. put_pages(obj);
  737. }
  738. if (msm_obj->resv == &msm_obj->_resv)
  739. dma_resv_fini(msm_obj->resv);
  740. drm_gem_object_release(obj);
  741. mutex_unlock(&msm_obj->lock);
  742. kfree(msm_obj);
  743. }
  744. /* convenience method to construct a GEM buffer object, and userspace handle */
  745. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  746. uint32_t size, uint32_t flags, uint32_t *handle,
  747. char *name)
  748. {
  749. struct drm_gem_object *obj;
  750. int ret;
  751. obj = msm_gem_new(dev, size, flags);
  752. if (IS_ERR(obj))
  753. return PTR_ERR(obj);
  754. if (name)
  755. msm_gem_object_set_name(obj, "%s", name);
  756. ret = drm_gem_handle_create(file, obj, handle);
  757. /* drop reference from allocate - handle holds it now */
  758. drm_gem_object_put(obj);
  759. return ret;
  760. }
  761. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  762. static const struct vm_operations_struct vm_ops = {
  763. .fault = msm_gem_fault,
  764. .open = drm_gem_vm_open,
  765. .close = drm_gem_vm_close,
  766. };
  767. static const struct drm_gem_object_funcs msm_gem_object_funcs = {
  768. .free = msm_gem_free_object,
  769. .pin = msm_gem_prime_pin,
  770. .unpin = msm_gem_prime_unpin,
  771. .get_sg_table = msm_gem_prime_get_sg_table,
  772. .vmap = msm_gem_prime_vmap,
  773. .vunmap = msm_gem_prime_vunmap,
  774. .vm_ops = &vm_ops,
  775. };
  776. #endif
  777. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  778. static int msm_gem_new_impl(struct drm_device *dev,
  779. uint32_t size, uint32_t flags,
  780. struct dma_resv *resv,
  781. struct drm_gem_object **obj)
  782. {
  783. #else
  784. static int msm_gem_new_impl(struct drm_device *dev,
  785. uint32_t size, uint32_t flags,
  786. struct dma_resv *resv,
  787. struct drm_gem_object **obj,
  788. bool struct_mutex_locked)
  789. {
  790. struct msm_drm_private *priv = dev->dev_private;
  791. #endif
  792. struct msm_gem_object *msm_obj;
  793. switch (flags & MSM_BO_CACHE_MASK) {
  794. case MSM_BO_UNCACHED:
  795. case MSM_BO_CACHED:
  796. case MSM_BO_WC:
  797. break;
  798. default:
  799. dev_err(dev->dev, "invalid cache flag: %x\n",
  800. (flags & MSM_BO_CACHE_MASK));
  801. return -EINVAL;
  802. }
  803. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  804. if (!msm_obj)
  805. return -ENOMEM;
  806. mutex_init(&msm_obj->lock);
  807. msm_obj->flags = flags;
  808. msm_obj->madv = MSM_MADV_WILLNEED;
  809. if (resv) {
  810. msm_obj->resv = resv;
  811. } else {
  812. msm_obj->resv = &msm_obj->_resv;
  813. dma_resv_init(msm_obj->resv);
  814. }
  815. INIT_LIST_HEAD(&msm_obj->submit_entry);
  816. INIT_LIST_HEAD(&msm_obj->vmas);
  817. INIT_LIST_HEAD(&msm_obj->iova_list);
  818. msm_obj->aspace = msm_gem_smmu_address_space_get(dev,
  819. MSM_SMMU_DOMAIN_UNSECURE);
  820. if (IS_ERR(msm_obj->aspace))
  821. msm_obj->aspace = NULL;
  822. msm_obj->in_active_list = false;
  823. msm_obj->obj_dirty = false;
  824. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  825. mutex_lock(&priv->mm_lock);
  826. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  827. mutex_unlock(&priv->mm_lock);
  828. #endif
  829. *obj = &msm_obj->base;
  830. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  831. (*obj)->funcs = &msm_gem_object_funcs;
  832. #endif
  833. return 0;
  834. }
  835. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  836. struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
  837. #else
  838. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  839. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  840. #endif
  841. {
  842. struct msm_drm_private *priv = dev->dev_private;
  843. struct msm_gem_object *msm_obj;
  844. struct drm_gem_object *obj = NULL;
  845. bool use_vram = false;
  846. int ret;
  847. size = PAGE_ALIGN(size);
  848. if (!iommu_present(&platform_bus_type))
  849. use_vram = true;
  850. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  851. use_vram = true;
  852. if (WARN_ON(use_vram && !priv->vram.size))
  853. return ERR_PTR(-EINVAL);
  854. /* Disallow zero sized objects as they make the underlying
  855. * infrastructure grumpy
  856. */
  857. if (size == 0)
  858. return ERR_PTR(-EINVAL);
  859. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  860. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
  861. #else
  862. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  863. #endif
  864. if (ret)
  865. goto fail;
  866. msm_obj = to_msm_bo(obj);
  867. if (use_vram) {
  868. struct msm_gem_vma *vma;
  869. struct page **pages;
  870. mutex_lock(&msm_obj->lock);
  871. vma = add_vma(obj, NULL);
  872. mutex_unlock(&msm_obj->lock);
  873. if (IS_ERR(vma)) {
  874. ret = PTR_ERR(vma);
  875. goto fail;
  876. }
  877. to_msm_bo(obj)->vram_node = &vma->node;
  878. drm_gem_private_object_init(dev, obj, size);
  879. pages = get_pages(obj);
  880. if (IS_ERR(pages)) {
  881. ret = PTR_ERR(pages);
  882. goto fail;
  883. }
  884. vma->iova = physaddr(obj);
  885. } else {
  886. ret = drm_gem_object_init(dev, obj, size);
  887. if (ret)
  888. goto fail;
  889. }
  890. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  891. mutex_lock(&dev->struct_mutex);
  892. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  893. mutex_unlock(&dev->struct_mutex);
  894. #endif
  895. return obj;
  896. fail:
  897. drm_gem_object_put(obj);
  898. return ERR_PTR(ret);
  899. }
  900. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  901. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  902. uint32_t size, uint32_t flags)
  903. {
  904. return _msm_gem_new(dev, size, flags, true);
  905. }
  906. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  907. uint32_t size, uint32_t flags)
  908. {
  909. return _msm_gem_new(dev, size, flags, false);
  910. }
  911. #endif
  912. int msm_gem_delayed_import(struct drm_gem_object *obj)
  913. {
  914. struct dma_buf_attachment *attach;
  915. struct sg_table *sgt;
  916. struct msm_gem_object *msm_obj;
  917. int ret = 0;
  918. if (!obj) {
  919. DRM_ERROR("NULL drm gem object\n");
  920. return -EINVAL;
  921. }
  922. msm_obj = to_msm_bo(obj);
  923. if (!obj->import_attach) {
  924. DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
  925. return -EINVAL;
  926. }
  927. attach = obj->import_attach;
  928. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  929. /*
  930. * dma_buf_map_attachment will call dma_map_sg for ion buffer
  931. * mapping, and iova will get mapped when the function returns.
  932. */
  933. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  934. if (IS_ERR(sgt)) {
  935. ret = PTR_ERR(sgt);
  936. DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
  937. ret);
  938. goto fail_import;
  939. }
  940. msm_obj->sgt = sgt;
  941. msm_obj->pages = NULL;
  942. fail_import:
  943. return ret;
  944. }
  945. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  946. struct dma_buf *dmabuf, struct sg_table *sgt)
  947. {
  948. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  949. struct msm_drm_private *priv = dev->dev_private;
  950. #endif
  951. struct msm_gem_object *msm_obj;
  952. struct drm_gem_object *obj = NULL;
  953. uint32_t size;
  954. int ret;
  955. unsigned long flags = 0;
  956. size = PAGE_ALIGN(dmabuf->size);
  957. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  958. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
  959. #else
  960. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
  961. #endif
  962. if (ret)
  963. goto fail;
  964. drm_gem_private_object_init(dev, obj, size);
  965. msm_obj = to_msm_bo(obj);
  966. mutex_lock(&msm_obj->lock);
  967. msm_obj->sgt = sgt;
  968. msm_obj->pages = NULL;
  969. /*
  970. * 1) If sg table is NULL, user should call msm_gem_delayed_import
  971. * to add back the sg table to the drm gem object.
  972. *
  973. * 2) Add buffer flag unconditionally for all import cases.
  974. * # Cached buffer will be attached immediately hence sgt will
  975. * be available upon gem obj creation.
  976. * # Un-cached buffer will follow delayed attach hence sgt
  977. * will be NULL upon gem obj creation.
  978. */
  979. msm_obj->flags |= MSM_BO_EXTBUF;
  980. ret = dma_buf_get_flags(dmabuf, &flags);
  981. if (ret)
  982. DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
  983. mutex_unlock(&msm_obj->lock);
  984. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  985. mutex_lock(&dev->struct_mutex);
  986. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  987. mutex_unlock(&dev->struct_mutex);
  988. #endif
  989. return obj;
  990. fail:
  991. drm_gem_object_put(obj);
  992. return ERR_PTR(ret);
  993. }
  994. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  995. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  996. uint32_t flags, struct msm_gem_address_space *aspace,
  997. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  998. {
  999. void *vaddr;
  1000. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  1001. int ret;
  1002. if (IS_ERR(obj))
  1003. return ERR_CAST(obj);
  1004. if (iova) {
  1005. ret = msm_gem_get_iova(obj, aspace, iova);
  1006. if (ret)
  1007. goto err;
  1008. }
  1009. vaddr = msm_gem_get_vaddr(obj);
  1010. if (IS_ERR(vaddr)) {
  1011. msm_gem_put_iova(obj, aspace);
  1012. ret = PTR_ERR(vaddr);
  1013. goto err;
  1014. }
  1015. if (bo)
  1016. *bo = obj;
  1017. return vaddr;
  1018. err:
  1019. if (locked)
  1020. drm_gem_object_put_locked(obj);
  1021. else
  1022. drm_gem_object_put(obj);
  1023. return ERR_PTR(ret);
  1024. }
  1025. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1026. uint32_t flags, struct msm_gem_address_space *aspace,
  1027. struct drm_gem_object **bo, uint64_t *iova)
  1028. {
  1029. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  1030. }
  1031. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  1032. uint32_t flags, struct msm_gem_address_space *aspace,
  1033. struct drm_gem_object **bo, uint64_t *iova)
  1034. {
  1035. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  1036. }
  1037. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1038. struct msm_gem_address_space *aspace, bool locked)
  1039. {
  1040. if (IS_ERR_OR_NULL(bo))
  1041. return;
  1042. msm_gem_put_vaddr(bo);
  1043. msm_gem_unpin_iova(bo, aspace);
  1044. if (locked)
  1045. drm_gem_object_put_locked(bo);
  1046. else
  1047. drm_gem_object_put(bo);
  1048. }
  1049. #endif
  1050. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1051. {
  1052. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1053. va_list ap;
  1054. if (!fmt)
  1055. return;
  1056. va_start(ap, fmt);
  1057. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1058. va_end(ap);
  1059. }
  1060. void msm_gem_put_buffer(struct drm_gem_object *gem)
  1061. {
  1062. struct msm_gem_object *msm_gem;
  1063. if (!gem)
  1064. return;
  1065. msm_gem = to_msm_bo(gem);
  1066. msm_gem_put_iova(gem, msm_gem->aspace);
  1067. msm_gem_put_vaddr(gem);
  1068. }
  1069. int msm_gem_get_buffer(struct drm_gem_object *gem,
  1070. struct drm_device *dev, struct drm_framebuffer *fb,
  1071. uint32_t align_size)
  1072. {
  1073. struct msm_gem_object *msm_gem;
  1074. uint32_t size;
  1075. uint64_t iova_aligned;
  1076. int ret = -EINVAL;
  1077. if (!gem) {
  1078. DRM_ERROR("invalid drm gem");
  1079. return ret;
  1080. }
  1081. msm_gem = to_msm_bo(gem);
  1082. size = PAGE_ALIGN(gem->size);
  1083. if (size < (align_size + GUARD_BYTES)) {
  1084. DRM_ERROR("invalid gem size");
  1085. goto exit;
  1086. }
  1087. msm_gem_smmu_address_space_get(dev, MSM_SMMU_DOMAIN_UNSECURE);
  1088. if (PTR_ERR(msm_gem->aspace) == -ENODEV) {
  1089. DRM_DEBUG("IOMMU not present, relying on VRAM.");
  1090. } else if (IS_ERR_OR_NULL(msm_gem->aspace)) {
  1091. ret = PTR_ERR(msm_gem->aspace);
  1092. DRM_ERROR("failed to get aspace");
  1093. goto exit;
  1094. }
  1095. ret = msm_gem_get_iova(gem, msm_gem->aspace, &msm_gem->iova);
  1096. if (ret) {
  1097. DRM_ERROR("failed to get the iova ret %d", ret);
  1098. goto exit;
  1099. }
  1100. msm_gem_get_vaddr(gem);
  1101. if (IS_ERR_OR_NULL(msm_gem->vaddr)) {
  1102. DRM_ERROR("failed to get vaddr");
  1103. goto exit;
  1104. }
  1105. iova_aligned = (msm_gem->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  1106. msm_gem->offset = iova_aligned - msm_gem->iova;
  1107. msm_gem->iova = msm_gem->iova + msm_gem->offset;
  1108. return 0;
  1109. exit:
  1110. msm_gem_put_buffer(gem);
  1111. return ret;
  1112. }