msm_gem.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. /*
  2. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/qcom-dma-mapping.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/shmem_fs.h>
  21. #include <linux/dma-buf.h>
  22. #include <linux/pfn_t.h>
  23. #include <linux/version.h>
  24. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  25. #include <linux/ion.h>
  26. #endif
  27. #include "msm_drv.h"
  28. #include "msm_gem.h"
  29. #include "msm_mmu.h"
  30. #include "sde_dbg.h"
  31. #define GUARD_BYTES (BIT(8) - 1)
  32. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  33. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  34. static dma_addr_t physaddr(struct drm_gem_object *obj)
  35. {
  36. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  37. struct msm_drm_private *priv = obj->dev->dev_private;
  38. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  39. priv->vram.paddr;
  40. }
  41. static bool use_pages(struct drm_gem_object *obj)
  42. {
  43. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  44. return !msm_obj->vram_node;
  45. }
  46. /* allocate pages from VRAM carveout, used when no IOMMU: */
  47. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  48. {
  49. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  50. struct msm_drm_private *priv = obj->dev->dev_private;
  51. dma_addr_t paddr;
  52. struct page **p;
  53. int ret, i;
  54. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  55. if (!p)
  56. return ERR_PTR(-ENOMEM);
  57. spin_lock(&priv->vram.lock);
  58. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  59. spin_unlock(&priv->vram.lock);
  60. if (ret) {
  61. kvfree(p);
  62. return ERR_PTR(ret);
  63. }
  64. paddr = physaddr(obj);
  65. for (i = 0; i < npages; i++) {
  66. p[i] = phys_to_page(paddr);
  67. paddr += PAGE_SIZE;
  68. }
  69. return p;
  70. }
  71. static struct page **get_pages(struct drm_gem_object *obj)
  72. {
  73. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  74. struct device *aspace_dev;
  75. if (obj->import_attach)
  76. return msm_obj->pages;
  77. if (!msm_obj->pages) {
  78. struct drm_device *dev = obj->dev;
  79. struct page **p;
  80. int npages = obj->size >> PAGE_SHIFT;
  81. if (use_pages(obj))
  82. p = drm_gem_get_pages(obj);
  83. else
  84. p = get_pages_vram(obj, npages);
  85. if (IS_ERR(p)) {
  86. dev_err(dev->dev, "could not get pages: %ld\n",
  87. PTR_ERR(p));
  88. return p;
  89. }
  90. msm_obj->pages = p;
  91. msm_obj->sgt = drm_prime_pages_to_sg(dev, p, npages);
  92. if (IS_ERR(msm_obj->sgt)) {
  93. void *ptr = ERR_CAST(msm_obj->sgt);
  94. dev_err(dev->dev, "failed to allocate sgt\n");
  95. msm_obj->sgt = NULL;
  96. return ptr;
  97. }
  98. if (msm_obj->vram_node) {
  99. goto end;
  100. /*
  101. * For non-cached buffers, ensure the new pages are clean
  102. * because display controller, GPU, etc. are not coherent
  103. */
  104. } else if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
  105. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  106. if (aspace_dev) {
  107. dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
  108. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  109. /* mark the buffer as external buffer */
  110. msm_obj->flags |= MSM_BO_EXTBUF;
  111. } else {
  112. DRM_ERROR("failed to get aspace_device\n");
  113. }
  114. }
  115. }
  116. end:
  117. return msm_obj->pages;
  118. }
  119. static void put_pages_vram(struct drm_gem_object *obj)
  120. {
  121. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  122. struct msm_drm_private *priv = obj->dev->dev_private;
  123. spin_lock(&priv->vram.lock);
  124. drm_mm_remove_node(msm_obj->vram_node);
  125. spin_unlock(&priv->vram.lock);
  126. kvfree(msm_obj->pages);
  127. }
  128. static void put_pages(struct drm_gem_object *obj)
  129. {
  130. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  131. if (msm_obj->pages) {
  132. if (msm_obj->sgt) {
  133. sg_free_table(msm_obj->sgt);
  134. kfree(msm_obj->sgt);
  135. }
  136. if (use_pages(obj))
  137. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  138. else
  139. put_pages_vram(obj);
  140. msm_obj->pages = NULL;
  141. }
  142. }
  143. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  144. {
  145. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  146. struct page **p;
  147. mutex_lock(&msm_obj->lock);
  148. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  149. mutex_unlock(&msm_obj->lock);
  150. return ERR_PTR(-EBUSY);
  151. }
  152. p = get_pages(obj);
  153. mutex_unlock(&msm_obj->lock);
  154. return p;
  155. }
  156. void msm_gem_put_pages(struct drm_gem_object *obj)
  157. {
  158. /* when we start tracking the pin count, then do something here */
  159. }
  160. void msm_gem_sync(struct drm_gem_object *obj)
  161. {
  162. struct msm_gem_object *msm_obj;
  163. struct device *aspace_dev;
  164. if (!obj)
  165. return;
  166. msm_obj = to_msm_bo(obj);
  167. if (msm_obj->vram_node)
  168. return;
  169. /*
  170. * dma_sync_sg_for_device synchronises a single contiguous or
  171. * scatter/gather mapping for the CPU and device.
  172. */
  173. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  174. if (aspace_dev)
  175. dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
  176. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  177. else
  178. DRM_ERROR("failed to get aspace_device\n");
  179. }
  180. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  181. struct vm_area_struct *vma)
  182. {
  183. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  184. vma->vm_flags &= ~VM_PFNMAP;
  185. vma->vm_flags |= VM_MIXEDMAP;
  186. if (msm_obj->flags & MSM_BO_WC) {
  187. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  188. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  189. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  190. } else {
  191. /*
  192. * Shunt off cached objs to shmem file so they have their own
  193. * address_space (so unmap_mapping_range does what we want,
  194. * in particular in the case of mmap'd dmabufs)
  195. */
  196. fput(vma->vm_file);
  197. get_file(obj->filp);
  198. vma->vm_pgoff = 0;
  199. vma->vm_file = obj->filp;
  200. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  201. }
  202. return 0;
  203. }
  204. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  205. {
  206. int ret;
  207. ret = drm_gem_mmap(filp, vma);
  208. if (ret) {
  209. DBG("mmap failed: %d", ret);
  210. return ret;
  211. }
  212. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  213. }
  214. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  215. static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  216. #else
  217. vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  218. #endif
  219. {
  220. struct vm_area_struct *vma = vmf->vma;
  221. struct drm_gem_object *obj = vma->vm_private_data;
  222. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  223. struct page **pages;
  224. unsigned long pfn;
  225. pgoff_t pgoff;
  226. int err;
  227. vm_fault_t ret;
  228. /*
  229. * vm_ops.open/drm_gem_mmap_obj and close get and put
  230. * a reference on obj. So, we dont need to hold one here.
  231. */
  232. err = mutex_lock_interruptible(&msm_obj->lock);
  233. if (err) {
  234. ret = VM_FAULT_NOPAGE;
  235. goto out;
  236. }
  237. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  238. mutex_unlock(&msm_obj->lock);
  239. return VM_FAULT_SIGBUS;
  240. }
  241. /* make sure we have pages attached now */
  242. pages = get_pages(obj);
  243. if (IS_ERR(pages)) {
  244. ret = vmf_error(PTR_ERR(pages));
  245. goto out_unlock;
  246. }
  247. /* We don't use vmf->pgoff since that has the fake offset: */
  248. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  249. pfn = page_to_pfn(pages[pgoff]);
  250. VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
  251. pfn, pfn << PAGE_SHIFT);
  252. ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  253. out_unlock:
  254. mutex_unlock(&msm_obj->lock);
  255. out:
  256. return ret;
  257. }
  258. /** get mmap offset */
  259. static uint64_t mmap_offset(struct drm_gem_object *obj)
  260. {
  261. struct drm_device *dev = obj->dev;
  262. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  263. int ret;
  264. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  265. /* Make it mmapable */
  266. ret = drm_gem_create_mmap_offset(obj);
  267. if (ret) {
  268. dev_err(dev->dev, "could not allocate mmap offset\n");
  269. return 0;
  270. }
  271. return drm_vma_node_offset_addr(&obj->vma_node);
  272. }
  273. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  274. {
  275. uint64_t offset;
  276. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  277. mutex_lock(&msm_obj->lock);
  278. offset = mmap_offset(obj);
  279. mutex_unlock(&msm_obj->lock);
  280. return offset;
  281. }
  282. dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
  283. {
  284. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  285. struct sg_table *sgt;
  286. if (!msm_obj->sgt) {
  287. sgt = dma_buf_map_attachment(obj->import_attach,
  288. DMA_BIDIRECTIONAL);
  289. if (IS_ERR_OR_NULL(sgt)) {
  290. DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
  291. PTR_ERR(sgt));
  292. return 0;
  293. }
  294. msm_obj->sgt = sgt;
  295. }
  296. return sg_phys(msm_obj->sgt->sgl);
  297. }
  298. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  299. struct msm_gem_address_space *aspace)
  300. {
  301. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  302. struct msm_gem_vma *vma;
  303. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  304. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  305. if (!vma)
  306. return ERR_PTR(-ENOMEM);
  307. vma->aspace = aspace;
  308. msm_obj->aspace = aspace;
  309. list_add_tail(&vma->list, &msm_obj->vmas);
  310. return vma;
  311. }
  312. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  313. struct msm_gem_address_space *aspace)
  314. {
  315. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  316. struct msm_gem_vma *vma;
  317. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  318. list_for_each_entry(vma, &msm_obj->vmas, list) {
  319. if (vma->aspace == aspace)
  320. return vma;
  321. }
  322. return NULL;
  323. }
  324. static void del_vma(struct msm_gem_vma *vma)
  325. {
  326. if (!vma)
  327. return;
  328. list_del(&vma->list);
  329. kfree(vma);
  330. }
  331. /* Called with msm_obj->lock locked */
  332. static void
  333. put_iova(struct drm_gem_object *obj)
  334. {
  335. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  336. struct msm_gem_vma *vma, *tmp;
  337. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  338. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  339. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  340. msm_obj->flags);
  341. /*
  342. * put_iova removes the domain connected to the obj which makes
  343. * the aspace inaccessible. Store the aspace, as it is used to
  344. * update the active_list during gem_free_obj and gem_purge.
  345. */
  346. msm_obj->aspace = vma->aspace;
  347. del_vma(vma);
  348. }
  349. }
  350. /* get iova, taking a reference. Should have a matching put */
  351. static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
  352. struct msm_gem_address_space *aspace, uint64_t *iova)
  353. {
  354. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  355. struct msm_gem_vma *vma;
  356. int ret = 0;
  357. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  358. vma = lookup_vma(obj, aspace);
  359. if (!vma) {
  360. struct page **pages;
  361. struct device *dev;
  362. struct dma_buf *dmabuf;
  363. bool reattach = false;
  364. unsigned long dma_map_attrs;
  365. dev = msm_gem_get_aspace_device(aspace);
  366. if ((dev && obj->import_attach) &&
  367. ((dev != obj->import_attach->dev) ||
  368. msm_obj->obj_dirty)) {
  369. dmabuf = obj->import_attach->dmabuf;
  370. dma_map_attrs = obj->import_attach->dma_map_attrs;
  371. DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
  372. obj->import_attach->dev, dev);
  373. SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
  374. msm_obj->obj_dirty);
  375. if (msm_obj->sgt)
  376. dma_buf_unmap_attachment(obj->import_attach,
  377. msm_obj->sgt, DMA_BIDIRECTIONAL);
  378. dma_buf_detach(dmabuf, obj->import_attach);
  379. obj->import_attach = dma_buf_attach(dmabuf, dev);
  380. if (IS_ERR(obj->import_attach)) {
  381. DRM_ERROR("dma_buf_attach failure, err=%ld\n",
  382. PTR_ERR(obj->import_attach));
  383. ret = PTR_ERR(obj->import_attach);
  384. return ret;
  385. }
  386. /*
  387. * obj->import_attach is created as part of dma_buf_attach.
  388. * Re-apply the dma_map_attr in this case to be in sync
  389. * with iommu_map attrs during map_attachment callback.
  390. */
  391. obj->import_attach->dma_map_attrs |= dma_map_attrs;
  392. msm_obj->obj_dirty = false;
  393. reattach = true;
  394. }
  395. /* perform delayed import for buffers without existing sgt */
  396. if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
  397. || reattach) {
  398. ret = msm_gem_delayed_import(obj);
  399. if (ret) {
  400. DRM_ERROR("delayed dma-buf import failed %d\n",
  401. ret);
  402. msm_obj->obj_dirty = true;
  403. return ret;
  404. }
  405. }
  406. vma = add_vma(obj, aspace);
  407. if (IS_ERR(vma)) {
  408. ret = PTR_ERR(vma);
  409. return ret;
  410. }
  411. pages = get_pages(obj);
  412. if (IS_ERR(pages)) {
  413. ret = PTR_ERR(pages);
  414. goto fail;
  415. }
  416. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  417. obj->size >> PAGE_SHIFT,
  418. msm_obj->flags);
  419. if (ret)
  420. goto fail;
  421. }
  422. *iova = vma->iova;
  423. if (aspace && !msm_obj->in_active_list) {
  424. mutex_lock(&aspace->list_lock);
  425. msm_gem_add_obj_to_aspace_active_list(aspace, obj);
  426. mutex_unlock(&aspace->list_lock);
  427. }
  428. return 0;
  429. fail:
  430. del_vma(vma);
  431. return ret;
  432. }
  433. int msm_gem_get_iova(struct drm_gem_object *obj,
  434. struct msm_gem_address_space *aspace, uint64_t *iova)
  435. {
  436. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  437. int ret;
  438. mutex_lock(&msm_obj->lock);
  439. ret = msm_gem_get_iova_locked(obj, aspace, iova);
  440. mutex_unlock(&msm_obj->lock);
  441. return ret;
  442. }
  443. /* get iova without taking a reference, used in places where you have
  444. * already done a 'msm_gem_get_iova()'.
  445. */
  446. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  447. struct msm_gem_address_space *aspace)
  448. {
  449. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  450. struct msm_gem_vma *vma;
  451. mutex_lock(&msm_obj->lock);
  452. vma = lookup_vma(obj, aspace);
  453. mutex_unlock(&msm_obj->lock);
  454. WARN_ON(!vma);
  455. return vma ? vma->iova : 0;
  456. }
  457. void msm_gem_put_iova(struct drm_gem_object *obj,
  458. struct msm_gem_address_space *aspace)
  459. {
  460. // XXX TODO ..
  461. // NOTE: probably don't need a _locked() version.. we wouldn't
  462. // normally unmap here, but instead just mark that it could be
  463. // unmapped (if the iova refcnt drops to zero), but then later
  464. // if another _get_iova_locked() fails we can start unmapping
  465. // things that are no longer needed..
  466. }
  467. void msm_gem_aspace_domain_attach_detach_update(
  468. struct msm_gem_address_space *aspace,
  469. bool is_detach)
  470. {
  471. struct msm_gem_object *msm_obj;
  472. struct drm_gem_object *obj;
  473. struct aspace_client *aclient;
  474. int ret;
  475. uint64_t iova;
  476. if (!aspace)
  477. return;
  478. mutex_lock(&aspace->list_lock);
  479. if (is_detach) {
  480. /* Indicate to clients domain is getting detached */
  481. list_for_each_entry(aclient, &aspace->clients, list) {
  482. if (aclient->cb)
  483. aclient->cb(aclient->cb_data,
  484. is_detach);
  485. }
  486. /**
  487. * Unmap active buffers,
  488. * typically clients should do this when the callback is called,
  489. * but this needs to be done for the buffers which are not
  490. * attached to any planes.
  491. */
  492. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  493. obj = &msm_obj->base;
  494. if (obj->import_attach) {
  495. mutex_lock(&msm_obj->lock);
  496. put_iova(obj);
  497. msm_obj->obj_dirty = true;
  498. mutex_unlock(&msm_obj->lock);
  499. }
  500. }
  501. } else {
  502. /* map active buffers */
  503. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  504. obj = &msm_obj->base;
  505. ret = msm_gem_get_iova(obj, aspace, &iova);
  506. if (ret) {
  507. mutex_unlock(&aspace->list_lock);
  508. return;
  509. }
  510. }
  511. /* Indicate to clients domain is attached */
  512. list_for_each_entry(aclient, &aspace->clients, list) {
  513. if (aclient->cb)
  514. aclient->cb(aclient->cb_data,
  515. is_detach);
  516. }
  517. }
  518. mutex_unlock(&aspace->list_lock);
  519. }
  520. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  521. struct drm_mode_create_dumb *args)
  522. {
  523. args->pitch = align_pitch(args->width, args->bpp);
  524. args->size = PAGE_ALIGN(args->pitch * args->height);
  525. return msm_gem_new_handle(dev, file, args->size,
  526. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
  527. }
  528. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  529. uint32_t handle, uint64_t *offset)
  530. {
  531. struct drm_gem_object *obj;
  532. int ret = 0;
  533. /* GEM does all our handle to object mapping */
  534. obj = drm_gem_object_lookup(file, handle);
  535. if (obj == NULL) {
  536. ret = -ENOENT;
  537. goto fail;
  538. }
  539. *offset = msm_gem_mmap_offset(obj);
  540. drm_gem_object_put(obj);
  541. fail:
  542. return ret;
  543. }
  544. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  545. {
  546. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  547. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  548. struct dma_buf_map map;
  549. #endif
  550. int ret = 0;
  551. mutex_lock(&msm_obj->lock);
  552. if (WARN_ON(msm_obj->madv > madv)) {
  553. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  554. msm_obj->madv, madv);
  555. mutex_unlock(&msm_obj->lock);
  556. return ERR_PTR(-EBUSY);
  557. }
  558. /* increment vmap_count *before* vmap() call, so shrinker can
  559. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  560. * This guarantees that we won't try to msm_gem_vunmap() this
  561. * same object from within the vmap() call (while we already
  562. * hold msm_obj->lock)
  563. */
  564. msm_obj->vmap_count++;
  565. if (!msm_obj->vaddr) {
  566. struct page **pages = get_pages(obj);
  567. if (IS_ERR(pages)) {
  568. ret = PTR_ERR(pages);
  569. goto fail;
  570. }
  571. if (obj->import_attach) {
  572. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev)) {
  573. ret = dma_buf_begin_cpu_access(
  574. obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  575. if (ret)
  576. goto fail;
  577. }
  578. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  579. ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
  580. if (ret)
  581. goto fail;
  582. msm_obj->vaddr = map.vaddr;
  583. #else
  584. msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
  585. #endif
  586. } else {
  587. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  588. VM_MAP, PAGE_KERNEL);
  589. }
  590. if (msm_obj->vaddr == NULL) {
  591. ret = -ENOMEM;
  592. goto fail;
  593. }
  594. }
  595. mutex_unlock(&msm_obj->lock);
  596. return msm_obj->vaddr;
  597. fail:
  598. msm_obj->vmap_count--;
  599. mutex_unlock(&msm_obj->lock);
  600. return ERR_PTR(ret);
  601. }
  602. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  603. {
  604. return get_vaddr(obj, MSM_MADV_WILLNEED);
  605. }
  606. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  607. {
  608. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  609. mutex_lock(&msm_obj->lock);
  610. WARN_ON(msm_obj->vmap_count < 1);
  611. msm_obj->vmap_count--;
  612. mutex_unlock(&msm_obj->lock);
  613. }
  614. /* Update madvise status, returns true if not purged, else
  615. * false or -errno.
  616. */
  617. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  618. {
  619. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  620. mutex_lock(&msm_obj->lock);
  621. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  622. if (msm_obj->madv != __MSM_MADV_PURGED)
  623. msm_obj->madv = madv;
  624. madv = msm_obj->madv;
  625. mutex_unlock(&msm_obj->lock);
  626. return (madv != __MSM_MADV_PURGED);
  627. }
  628. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  629. {
  630. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  631. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  632. struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
  633. #endif
  634. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  635. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  636. return;
  637. if (obj->import_attach) {
  638. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  639. dma_buf_vunmap(obj->import_attach->dmabuf, &map);
  640. dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  641. #else
  642. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  643. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
  644. dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  645. #endif
  646. } else {
  647. vunmap(msm_obj->vaddr);
  648. }
  649. msm_obj->vaddr = NULL;
  650. }
  651. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  652. {
  653. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  654. mutex_lock_nested(&msm_obj->lock, subclass);
  655. msm_gem_vunmap_locked(obj);
  656. mutex_unlock(&msm_obj->lock);
  657. }
  658. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  659. {
  660. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  661. bool write = !!(op & MSM_PREP_WRITE);
  662. unsigned long remain =
  663. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  664. long ret;
  665. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  666. ret = dma_resv_wait_timeout(msm_obj->resv, write, true, remain);
  667. #else
  668. ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write, true, remain);
  669. #endif
  670. if (ret == 0)
  671. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  672. else if (ret < 0)
  673. return ret;
  674. /* TODO cache maintenance */
  675. return 0;
  676. }
  677. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  678. {
  679. /* TODO cache maintenance */
  680. return 0;
  681. }
  682. /* don't call directly! Use drm_gem_object_put() and friends */
  683. void msm_gem_free_object(struct drm_gem_object *obj)
  684. {
  685. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  686. struct drm_device *dev = obj->dev;
  687. struct msm_drm_private *priv = dev->dev_private;
  688. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  689. struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
  690. #endif
  691. /* object should not be on active list: */
  692. WARN_ON(is_active(msm_obj));
  693. mutex_lock(&priv->mm_lock);
  694. list_del(&msm_obj->mm_list);
  695. mutex_unlock(&priv->mm_lock);
  696. mutex_lock(&msm_obj->lock);
  697. put_iova(obj);
  698. if (msm_obj->aspace) {
  699. mutex_lock(&msm_obj->aspace->list_lock);
  700. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  701. obj);
  702. mutex_unlock(&msm_obj->aspace->list_lock);
  703. }
  704. if (obj->import_attach) {
  705. if (msm_obj->vaddr)
  706. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  707. dma_buf_vunmap(obj->import_attach->dmabuf, &map);
  708. #else
  709. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  710. #endif
  711. /* Don't drop the pages for imported dmabuf, as they are not
  712. * ours, just free the array we allocated:
  713. */
  714. if (msm_obj->pages)
  715. kvfree(msm_obj->pages);
  716. drm_prime_gem_destroy(obj, msm_obj->sgt);
  717. } else {
  718. msm_gem_vunmap_locked(obj);
  719. put_pages(obj);
  720. }
  721. if (msm_obj->resv == &msm_obj->_resv)
  722. dma_resv_fini(msm_obj->resv);
  723. drm_gem_object_release(obj);
  724. mutex_unlock(&msm_obj->lock);
  725. kfree(msm_obj);
  726. }
  727. /* convenience method to construct a GEM buffer object, and userspace handle */
  728. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  729. uint32_t size, uint32_t flags, uint32_t *handle,
  730. char *name)
  731. {
  732. struct drm_gem_object *obj;
  733. int ret;
  734. obj = msm_gem_new(dev, size, flags);
  735. if (IS_ERR(obj))
  736. return PTR_ERR(obj);
  737. if (name)
  738. msm_gem_object_set_name(obj, "%s", name);
  739. ret = drm_gem_handle_create(file, obj, handle);
  740. /* drop reference from allocate - handle holds it now */
  741. drm_gem_object_put(obj);
  742. return ret;
  743. }
  744. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  745. static const struct vm_operations_struct vm_ops = {
  746. .fault = msm_gem_fault,
  747. .open = drm_gem_vm_open,
  748. .close = drm_gem_vm_close,
  749. };
  750. static const struct drm_gem_object_funcs msm_gem_object_funcs = {
  751. .free = msm_gem_free_object,
  752. .pin = msm_gem_prime_pin,
  753. .unpin = msm_gem_prime_unpin,
  754. .get_sg_table = msm_gem_prime_get_sg_table,
  755. .vmap = msm_gem_prime_vmap,
  756. .vunmap = msm_gem_prime_vunmap,
  757. .vm_ops = &vm_ops,
  758. };
  759. #endif
  760. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  761. static int msm_gem_new_impl(struct drm_device *dev,
  762. uint32_t size, uint32_t flags,
  763. struct dma_resv *resv,
  764. struct drm_gem_object **obj)
  765. {
  766. #else
  767. static int msm_gem_new_impl(struct drm_device *dev,
  768. uint32_t size, uint32_t flags,
  769. struct dma_resv *resv,
  770. struct drm_gem_object **obj,
  771. bool struct_mutex_locked)
  772. {
  773. struct msm_drm_private *priv = dev->dev_private;
  774. #endif
  775. struct msm_gem_object *msm_obj;
  776. switch (flags & MSM_BO_CACHE_MASK) {
  777. case MSM_BO_UNCACHED:
  778. case MSM_BO_CACHED:
  779. case MSM_BO_WC:
  780. break;
  781. default:
  782. dev_err(dev->dev, "invalid cache flag: %x\n",
  783. (flags & MSM_BO_CACHE_MASK));
  784. return -EINVAL;
  785. }
  786. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  787. if (!msm_obj)
  788. return -ENOMEM;
  789. mutex_init(&msm_obj->lock);
  790. msm_obj->flags = flags;
  791. msm_obj->madv = MSM_MADV_WILLNEED;
  792. if (resv) {
  793. msm_obj->resv = resv;
  794. } else {
  795. msm_obj->resv = &msm_obj->_resv;
  796. dma_resv_init(msm_obj->resv);
  797. }
  798. INIT_LIST_HEAD(&msm_obj->submit_entry);
  799. INIT_LIST_HEAD(&msm_obj->vmas);
  800. INIT_LIST_HEAD(&msm_obj->iova_list);
  801. msm_obj->aspace = msm_gem_smmu_address_space_get(dev,
  802. MSM_SMMU_DOMAIN_UNSECURE);
  803. if (IS_ERR(msm_obj->aspace))
  804. msm_obj->aspace = NULL;
  805. msm_obj->in_active_list = false;
  806. msm_obj->obj_dirty = false;
  807. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  808. mutex_lock(&priv->mm_lock);
  809. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  810. mutex_unlock(&priv->mm_lock);
  811. #endif
  812. *obj = &msm_obj->base;
  813. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  814. (*obj)->funcs = &msm_gem_object_funcs;
  815. #endif
  816. return 0;
  817. }
  818. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  819. struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
  820. #else
  821. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  822. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  823. #endif
  824. {
  825. struct msm_drm_private *priv = dev->dev_private;
  826. struct msm_gem_object *msm_obj;
  827. struct drm_gem_object *obj = NULL;
  828. bool use_vram = false;
  829. int ret;
  830. size = PAGE_ALIGN(size);
  831. if (!iommu_present(&platform_bus_type))
  832. use_vram = true;
  833. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  834. use_vram = true;
  835. if (WARN_ON(use_vram && !priv->vram.size))
  836. return ERR_PTR(-EINVAL);
  837. /* Disallow zero sized objects as they make the underlying
  838. * infrastructure grumpy
  839. */
  840. if (size == 0)
  841. return ERR_PTR(-EINVAL);
  842. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  843. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
  844. #else
  845. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  846. #endif
  847. if (ret)
  848. goto fail;
  849. msm_obj = to_msm_bo(obj);
  850. if (use_vram) {
  851. struct msm_gem_vma *vma;
  852. struct page **pages;
  853. mutex_lock(&msm_obj->lock);
  854. vma = add_vma(obj, NULL);
  855. mutex_unlock(&msm_obj->lock);
  856. if (IS_ERR(vma)) {
  857. ret = PTR_ERR(vma);
  858. goto fail;
  859. }
  860. to_msm_bo(obj)->vram_node = &vma->node;
  861. drm_gem_private_object_init(dev, obj, size);
  862. pages = get_pages(obj);
  863. if (IS_ERR(pages)) {
  864. ret = PTR_ERR(pages);
  865. goto fail;
  866. }
  867. vma->iova = physaddr(obj);
  868. } else {
  869. ret = drm_gem_object_init(dev, obj, size);
  870. if (ret)
  871. goto fail;
  872. }
  873. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  874. mutex_lock(&dev->struct_mutex);
  875. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  876. mutex_unlock(&dev->struct_mutex);
  877. #endif
  878. return obj;
  879. fail:
  880. drm_gem_object_put(obj);
  881. return ERR_PTR(ret);
  882. }
  883. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  884. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  885. uint32_t size, uint32_t flags)
  886. {
  887. return _msm_gem_new(dev, size, flags, true);
  888. }
  889. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  890. uint32_t size, uint32_t flags)
  891. {
  892. return _msm_gem_new(dev, size, flags, false);
  893. }
  894. #endif
  895. int msm_gem_delayed_import(struct drm_gem_object *obj)
  896. {
  897. struct dma_buf_attachment *attach;
  898. struct sg_table *sgt;
  899. struct msm_gem_object *msm_obj;
  900. int ret = 0;
  901. if (!obj) {
  902. DRM_ERROR("NULL drm gem object\n");
  903. return -EINVAL;
  904. }
  905. msm_obj = to_msm_bo(obj);
  906. if (!obj->import_attach) {
  907. DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
  908. return -EINVAL;
  909. }
  910. attach = obj->import_attach;
  911. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  912. /*
  913. * dma_buf_map_attachment will call dma_map_sg for ion buffer
  914. * mapping, and iova will get mapped when the function returns.
  915. */
  916. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  917. if (IS_ERR(sgt)) {
  918. ret = PTR_ERR(sgt);
  919. DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
  920. ret);
  921. goto fail_import;
  922. }
  923. msm_obj->sgt = sgt;
  924. msm_obj->pages = NULL;
  925. fail_import:
  926. return ret;
  927. }
  928. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  929. struct dma_buf *dmabuf, struct sg_table *sgt)
  930. {
  931. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  932. struct msm_drm_private *priv = dev->dev_private;
  933. #endif
  934. struct msm_gem_object *msm_obj;
  935. struct drm_gem_object *obj = NULL;
  936. uint32_t size;
  937. int ret;
  938. unsigned long flags = 0;
  939. size = PAGE_ALIGN(dmabuf->size);
  940. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  941. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
  942. #else
  943. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
  944. #endif
  945. if (ret)
  946. goto fail;
  947. drm_gem_private_object_init(dev, obj, size);
  948. msm_obj = to_msm_bo(obj);
  949. mutex_lock(&msm_obj->lock);
  950. msm_obj->sgt = sgt;
  951. msm_obj->pages = NULL;
  952. /*
  953. * 1) If sg table is NULL, user should call msm_gem_delayed_import
  954. * to add back the sg table to the drm gem object.
  955. *
  956. * 2) Add buffer flag unconditionally for all import cases.
  957. * # Cached buffer will be attached immediately hence sgt will
  958. * be available upon gem obj creation.
  959. * # Un-cached buffer will follow delayed attach hence sgt
  960. * will be NULL upon gem obj creation.
  961. */
  962. msm_obj->flags |= MSM_BO_EXTBUF;
  963. ret = dma_buf_get_flags(dmabuf, &flags);
  964. if (ret)
  965. DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
  966. mutex_unlock(&msm_obj->lock);
  967. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  968. mutex_lock(&dev->struct_mutex);
  969. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  970. mutex_unlock(&dev->struct_mutex);
  971. #endif
  972. return obj;
  973. fail:
  974. drm_gem_object_put(obj);
  975. return ERR_PTR(ret);
  976. }
  977. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  978. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  979. uint32_t flags, struct msm_gem_address_space *aspace,
  980. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  981. {
  982. void *vaddr;
  983. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  984. int ret;
  985. if (IS_ERR(obj))
  986. return ERR_CAST(obj);
  987. if (iova) {
  988. ret = msm_gem_get_iova(obj, aspace, iova);
  989. if (ret)
  990. goto err;
  991. }
  992. vaddr = msm_gem_get_vaddr(obj);
  993. if (IS_ERR(vaddr)) {
  994. msm_gem_put_iova(obj, aspace);
  995. ret = PTR_ERR(vaddr);
  996. goto err;
  997. }
  998. if (bo)
  999. *bo = obj;
  1000. return vaddr;
  1001. err:
  1002. if (locked)
  1003. drm_gem_object_put_locked(obj);
  1004. else
  1005. drm_gem_object_put(obj);
  1006. return ERR_PTR(ret);
  1007. }
  1008. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1009. uint32_t flags, struct msm_gem_address_space *aspace,
  1010. struct drm_gem_object **bo, uint64_t *iova)
  1011. {
  1012. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  1013. }
  1014. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  1015. uint32_t flags, struct msm_gem_address_space *aspace,
  1016. struct drm_gem_object **bo, uint64_t *iova)
  1017. {
  1018. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  1019. }
  1020. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1021. struct msm_gem_address_space *aspace, bool locked)
  1022. {
  1023. if (IS_ERR_OR_NULL(bo))
  1024. return;
  1025. msm_gem_put_vaddr(bo);
  1026. msm_gem_unpin_iova(bo, aspace);
  1027. if (locked)
  1028. drm_gem_object_put_locked(bo);
  1029. else
  1030. drm_gem_object_put(bo);
  1031. }
  1032. #endif
  1033. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1034. {
  1035. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1036. va_list ap;
  1037. if (!fmt)
  1038. return;
  1039. va_start(ap, fmt);
  1040. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1041. va_end(ap);
  1042. }
  1043. void msm_gem_put_buffer(struct drm_gem_object *gem)
  1044. {
  1045. struct msm_gem_object *msm_gem;
  1046. if (!gem)
  1047. return;
  1048. msm_gem = to_msm_bo(gem);
  1049. msm_gem_put_iova(gem, msm_gem->aspace);
  1050. msm_gem_put_vaddr(gem);
  1051. }
  1052. int msm_gem_get_buffer(struct drm_gem_object *gem,
  1053. struct drm_device *dev, struct drm_framebuffer *fb,
  1054. uint32_t align_size)
  1055. {
  1056. struct msm_gem_object *msm_gem;
  1057. uint32_t size;
  1058. uint64_t iova_aligned;
  1059. int ret = -EINVAL;
  1060. if (!gem) {
  1061. DRM_ERROR("invalid drm gem");
  1062. return ret;
  1063. }
  1064. msm_gem = to_msm_bo(gem);
  1065. size = PAGE_ALIGN(gem->size);
  1066. if (size < (align_size + GUARD_BYTES)) {
  1067. DRM_ERROR("invalid gem size");
  1068. goto exit;
  1069. }
  1070. msm_gem_smmu_address_space_get(dev, MSM_SMMU_DOMAIN_UNSECURE);
  1071. if (PTR_ERR(msm_gem->aspace) == -ENODEV) {
  1072. DRM_DEBUG("IOMMU not present, relying on VRAM.");
  1073. } else if (IS_ERR_OR_NULL(msm_gem->aspace)) {
  1074. ret = PTR_ERR(msm_gem->aspace);
  1075. DRM_ERROR("failed to get aspace");
  1076. goto exit;
  1077. }
  1078. ret = msm_gem_get_iova(gem, msm_gem->aspace, &msm_gem->iova);
  1079. if (ret) {
  1080. DRM_ERROR("failed to get the iova ret %d", ret);
  1081. goto exit;
  1082. }
  1083. msm_gem_get_vaddr(gem);
  1084. if (IS_ERR_OR_NULL(msm_gem->vaddr)) {
  1085. DRM_ERROR("failed to get vaddr");
  1086. goto exit;
  1087. }
  1088. iova_aligned = (msm_gem->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  1089. msm_gem->offset = iova_aligned - msm_gem->iova;
  1090. msm_gem->iova = msm_gem->iova + msm_gem->offset;
  1091. return 0;
  1092. exit:
  1093. msm_gem_put_buffer(gem);
  1094. return ret;
  1095. }