msm_gem.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462
  1. /*
  2. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/qcom-dma-mapping.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/shmem_fs.h>
  21. #include <linux/dma-buf.h>
  22. #include <linux/pfn_t.h>
  23. #include <linux/ion.h>
  24. #include "msm_drv.h"
  25. #include "msm_gem.h"
  26. #include "msm_mmu.h"
  27. #include "sde_dbg.h"
  28. #define GUARD_BYTES (BIT(8) - 1)
  29. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  30. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  31. static dma_addr_t physaddr(struct drm_gem_object *obj)
  32. {
  33. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  34. struct msm_drm_private *priv = obj->dev->dev_private;
  35. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  36. priv->vram.paddr;
  37. }
  38. static bool use_pages(struct drm_gem_object *obj)
  39. {
  40. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  41. return !msm_obj->vram_node;
  42. }
  43. /* allocate pages from VRAM carveout, used when no IOMMU: */
  44. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  45. {
  46. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  47. struct msm_drm_private *priv = obj->dev->dev_private;
  48. dma_addr_t paddr;
  49. struct page **p;
  50. int ret, i;
  51. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  52. if (!p)
  53. return ERR_PTR(-ENOMEM);
  54. spin_lock(&priv->vram.lock);
  55. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  56. spin_unlock(&priv->vram.lock);
  57. if (ret) {
  58. kvfree(p);
  59. return ERR_PTR(ret);
  60. }
  61. paddr = physaddr(obj);
  62. for (i = 0; i < npages; i++) {
  63. p[i] = phys_to_page(paddr);
  64. paddr += PAGE_SIZE;
  65. }
  66. return p;
  67. }
  68. static struct page **get_pages(struct drm_gem_object *obj)
  69. {
  70. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  71. struct device *aspace_dev;
  72. if (obj->import_attach)
  73. return msm_obj->pages;
  74. if (!msm_obj->pages) {
  75. struct drm_device *dev = obj->dev;
  76. struct page **p;
  77. int npages = obj->size >> PAGE_SHIFT;
  78. if (use_pages(obj))
  79. p = drm_gem_get_pages(obj);
  80. else
  81. p = get_pages_vram(obj, npages);
  82. if (IS_ERR(p)) {
  83. dev_err(dev->dev, "could not get pages: %ld\n",
  84. PTR_ERR(p));
  85. return p;
  86. }
  87. msm_obj->pages = p;
  88. msm_obj->sgt = drm_prime_pages_to_sg(dev, p, npages);
  89. if (IS_ERR(msm_obj->sgt)) {
  90. void *ptr = ERR_CAST(msm_obj->sgt);
  91. dev_err(dev->dev, "failed to allocate sgt\n");
  92. msm_obj->sgt = NULL;
  93. return ptr;
  94. }
  95. if (msm_obj->vram_node) {
  96. goto end;
  97. /*
  98. * For non-cached buffers, ensure the new pages are clean
  99. * because display controller, GPU, etc. are not coherent
  100. */
  101. } else if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
  102. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  103. if (aspace_dev) {
  104. dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
  105. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  106. /* mark the buffer as external buffer */
  107. msm_obj->flags |= MSM_BO_EXTBUF;
  108. } else {
  109. DRM_ERROR("failed to get aspace_device\n");
  110. }
  111. }
  112. }
  113. end:
  114. return msm_obj->pages;
  115. }
  116. static void put_pages_vram(struct drm_gem_object *obj)
  117. {
  118. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  119. struct msm_drm_private *priv = obj->dev->dev_private;
  120. spin_lock(&priv->vram.lock);
  121. drm_mm_remove_node(msm_obj->vram_node);
  122. spin_unlock(&priv->vram.lock);
  123. kvfree(msm_obj->pages);
  124. }
  125. static void put_pages(struct drm_gem_object *obj)
  126. {
  127. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  128. if (msm_obj->pages) {
  129. if (msm_obj->sgt) {
  130. sg_free_table(msm_obj->sgt);
  131. kfree(msm_obj->sgt);
  132. }
  133. if (use_pages(obj))
  134. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  135. else
  136. put_pages_vram(obj);
  137. msm_obj->pages = NULL;
  138. }
  139. }
  140. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  141. {
  142. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  143. struct page **p;
  144. mutex_lock(&msm_obj->lock);
  145. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  146. mutex_unlock(&msm_obj->lock);
  147. return ERR_PTR(-EBUSY);
  148. }
  149. p = get_pages(obj);
  150. mutex_unlock(&msm_obj->lock);
  151. return p;
  152. }
  153. void msm_gem_put_pages(struct drm_gem_object *obj)
  154. {
  155. /* when we start tracking the pin count, then do something here */
  156. }
  157. void msm_gem_sync(struct drm_gem_object *obj)
  158. {
  159. struct msm_gem_object *msm_obj;
  160. struct device *aspace_dev;
  161. if (!obj)
  162. return;
  163. msm_obj = to_msm_bo(obj);
  164. if (msm_obj->vram_node)
  165. return;
  166. /*
  167. * dma_sync_sg_for_device synchronises a single contiguous or
  168. * scatter/gather mapping for the CPU and device.
  169. */
  170. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  171. if (aspace_dev)
  172. dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
  173. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  174. else
  175. DRM_ERROR("failed to get aspace_device\n");
  176. }
  177. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  178. struct vm_area_struct *vma)
  179. {
  180. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  181. vma->vm_flags &= ~VM_PFNMAP;
  182. vma->vm_flags |= VM_MIXEDMAP;
  183. if (msm_obj->flags & MSM_BO_WC) {
  184. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  185. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  186. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  187. } else {
  188. /*
  189. * Shunt off cached objs to shmem file so they have their own
  190. * address_space (so unmap_mapping_range does what we want,
  191. * in particular in the case of mmap'd dmabufs)
  192. */
  193. fput(vma->vm_file);
  194. get_file(obj->filp);
  195. vma->vm_pgoff = 0;
  196. vma->vm_file = obj->filp;
  197. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  198. }
  199. return 0;
  200. }
  201. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  202. {
  203. int ret;
  204. ret = drm_gem_mmap(filp, vma);
  205. if (ret) {
  206. DBG("mmap failed: %d", ret);
  207. return ret;
  208. }
  209. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  210. }
  211. vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  212. {
  213. struct vm_area_struct *vma = vmf->vma;
  214. struct drm_gem_object *obj = vma->vm_private_data;
  215. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  216. struct page **pages;
  217. unsigned long pfn;
  218. pgoff_t pgoff;
  219. int err;
  220. vm_fault_t ret;
  221. /*
  222. * vm_ops.open/drm_gem_mmap_obj and close get and put
  223. * a reference on obj. So, we dont need to hold one here.
  224. */
  225. err = mutex_lock_interruptible(&msm_obj->lock);
  226. if (err) {
  227. ret = VM_FAULT_NOPAGE;
  228. goto out;
  229. }
  230. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  231. mutex_unlock(&msm_obj->lock);
  232. return VM_FAULT_SIGBUS;
  233. }
  234. /* make sure we have pages attached now */
  235. pages = get_pages(obj);
  236. if (IS_ERR(pages)) {
  237. ret = vmf_error(PTR_ERR(pages));
  238. goto out_unlock;
  239. }
  240. /* We don't use vmf->pgoff since that has the fake offset: */
  241. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  242. pfn = page_to_pfn(pages[pgoff]);
  243. VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
  244. pfn, pfn << PAGE_SHIFT);
  245. ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  246. out_unlock:
  247. mutex_unlock(&msm_obj->lock);
  248. out:
  249. return ret;
  250. }
  251. /** get mmap offset */
  252. static uint64_t mmap_offset(struct drm_gem_object *obj)
  253. {
  254. struct drm_device *dev = obj->dev;
  255. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  256. int ret;
  257. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  258. /* Make it mmapable */
  259. ret = drm_gem_create_mmap_offset(obj);
  260. if (ret) {
  261. dev_err(dev->dev, "could not allocate mmap offset\n");
  262. return 0;
  263. }
  264. return drm_vma_node_offset_addr(&obj->vma_node);
  265. }
  266. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  267. {
  268. uint64_t offset;
  269. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  270. mutex_lock(&msm_obj->lock);
  271. offset = mmap_offset(obj);
  272. mutex_unlock(&msm_obj->lock);
  273. return offset;
  274. }
  275. dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
  276. {
  277. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  278. struct sg_table *sgt;
  279. if (!msm_obj->sgt) {
  280. sgt = dma_buf_map_attachment(obj->import_attach,
  281. DMA_BIDIRECTIONAL);
  282. if (IS_ERR_OR_NULL(sgt)) {
  283. DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
  284. PTR_ERR(sgt));
  285. return 0;
  286. }
  287. msm_obj->sgt = sgt;
  288. }
  289. return sg_phys(msm_obj->sgt->sgl);
  290. }
  291. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  292. struct msm_gem_address_space *aspace)
  293. {
  294. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  295. struct msm_gem_vma *vma;
  296. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  297. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  298. if (!vma)
  299. return ERR_PTR(-ENOMEM);
  300. vma->aspace = aspace;
  301. msm_obj->aspace = aspace;
  302. list_add_tail(&vma->list, &msm_obj->vmas);
  303. return vma;
  304. }
  305. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  306. struct msm_gem_address_space *aspace)
  307. {
  308. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  309. struct msm_gem_vma *vma;
  310. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  311. list_for_each_entry(vma, &msm_obj->vmas, list) {
  312. if (vma->aspace == aspace)
  313. return vma;
  314. }
  315. return NULL;
  316. }
  317. static void del_vma(struct msm_gem_vma *vma)
  318. {
  319. if (!vma)
  320. return;
  321. list_del(&vma->list);
  322. kfree(vma);
  323. }
  324. /* Called with msm_obj->lock locked */
  325. static void
  326. put_iova(struct drm_gem_object *obj)
  327. {
  328. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  329. struct msm_gem_vma *vma, *tmp;
  330. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  331. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  332. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  333. msm_obj->flags);
  334. /*
  335. * put_iova removes the domain connected to the obj which makes
  336. * the aspace inaccessible. Store the aspace, as it is used to
  337. * update the active_list during gem_free_obj and gem_purge.
  338. */
  339. msm_obj->aspace = vma->aspace;
  340. del_vma(vma);
  341. }
  342. }
  343. /* get iova, taking a reference. Should have a matching put */
  344. static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
  345. struct msm_gem_address_space *aspace, uint64_t *iova)
  346. {
  347. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  348. struct msm_gem_vma *vma;
  349. int ret = 0;
  350. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  351. vma = lookup_vma(obj, aspace);
  352. if (!vma) {
  353. struct page **pages;
  354. struct device *dev;
  355. struct dma_buf *dmabuf;
  356. bool reattach = false;
  357. unsigned long dma_map_attrs;
  358. dev = msm_gem_get_aspace_device(aspace);
  359. if ((dev && obj->import_attach) &&
  360. ((dev != obj->import_attach->dev) ||
  361. msm_obj->obj_dirty)) {
  362. dmabuf = obj->import_attach->dmabuf;
  363. dma_map_attrs = obj->import_attach->dma_map_attrs;
  364. DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
  365. obj->import_attach->dev, dev);
  366. SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
  367. msm_obj->obj_dirty);
  368. if (msm_obj->sgt)
  369. dma_buf_unmap_attachment(obj->import_attach,
  370. msm_obj->sgt, DMA_BIDIRECTIONAL);
  371. dma_buf_detach(dmabuf, obj->import_attach);
  372. obj->import_attach = dma_buf_attach(dmabuf, dev);
  373. if (IS_ERR(obj->import_attach)) {
  374. DRM_ERROR("dma_buf_attach failure, err=%ld\n",
  375. PTR_ERR(obj->import_attach));
  376. ret = PTR_ERR(obj->import_attach);
  377. return ret;
  378. }
  379. /*
  380. * obj->import_attach is created as part of dma_buf_attach.
  381. * Re-apply the dma_map_attr in this case to be in sync
  382. * with iommu_map attrs during map_attachment callback.
  383. */
  384. obj->import_attach->dma_map_attrs |= dma_map_attrs;
  385. msm_obj->obj_dirty = false;
  386. reattach = true;
  387. }
  388. /* perform delayed import for buffers without existing sgt */
  389. if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
  390. || reattach) {
  391. ret = msm_gem_delayed_import(obj);
  392. if (ret) {
  393. DRM_ERROR("delayed dma-buf import failed %d\n",
  394. ret);
  395. return ret;
  396. }
  397. }
  398. vma = add_vma(obj, aspace);
  399. if (IS_ERR(vma)) {
  400. ret = PTR_ERR(vma);
  401. return ret;
  402. }
  403. pages = get_pages(obj);
  404. if (IS_ERR(pages)) {
  405. ret = PTR_ERR(pages);
  406. goto fail;
  407. }
  408. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  409. obj->size >> PAGE_SHIFT,
  410. msm_obj->flags);
  411. if (ret)
  412. goto fail;
  413. }
  414. *iova = vma->iova;
  415. if (aspace && !msm_obj->in_active_list) {
  416. mutex_lock(&aspace->list_lock);
  417. msm_gem_add_obj_to_aspace_active_list(aspace, obj);
  418. mutex_unlock(&aspace->list_lock);
  419. }
  420. return 0;
  421. fail:
  422. del_vma(vma);
  423. return ret;
  424. }
  425. static int msm_gem_pin_iova(struct drm_gem_object *obj,
  426. struct msm_gem_address_space *aspace)
  427. {
  428. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  429. struct msm_gem_vma *vma;
  430. struct page **pages;
  431. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  432. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
  433. return -EBUSY;
  434. vma = lookup_vma(obj, aspace);
  435. if (WARN_ON(!vma))
  436. return -EINVAL;
  437. pages = get_pages(obj);
  438. if (IS_ERR(pages))
  439. return PTR_ERR(pages);
  440. return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  441. obj->size >> PAGE_SHIFT, msm_obj->flags);
  442. }
  443. /* get iova and pin it. Should have a matching put */
  444. int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
  445. struct msm_gem_address_space *aspace, uint64_t *iova)
  446. {
  447. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  448. u64 local;
  449. int ret;
  450. mutex_lock(&msm_obj->lock);
  451. ret = msm_gem_get_iova_locked(obj, aspace, &local);
  452. if (!ret)
  453. ret = msm_gem_pin_iova(obj, aspace);
  454. if (!ret)
  455. *iova = local;
  456. mutex_unlock(&msm_obj->lock);
  457. return ret;
  458. }
  459. int msm_gem_get_iova(struct drm_gem_object *obj,
  460. struct msm_gem_address_space *aspace, uint64_t *iova)
  461. {
  462. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  463. int ret;
  464. mutex_lock(&msm_obj->lock);
  465. ret = msm_gem_get_iova_locked(obj, aspace, iova);
  466. mutex_unlock(&msm_obj->lock);
  467. return ret;
  468. }
  469. /* get iova without taking a reference, used in places where you have
  470. * already done a 'msm_gem_get_iova()'.
  471. */
  472. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  473. struct msm_gem_address_space *aspace)
  474. {
  475. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  476. struct msm_gem_vma *vma;
  477. mutex_lock(&msm_obj->lock);
  478. vma = lookup_vma(obj, aspace);
  479. mutex_unlock(&msm_obj->lock);
  480. WARN_ON(!vma);
  481. return vma ? vma->iova : 0;
  482. }
  483. /*
  484. * Unpin a iova by updating the reference counts. The memory isn't actually
  485. * purged until something else (shrinker, mm_notifier, destroy, etc) decides
  486. * to get rid of it
  487. */
  488. void msm_gem_unpin_iova(struct drm_gem_object *obj,
  489. struct msm_gem_address_space *aspace)
  490. {
  491. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  492. struct msm_gem_vma *vma;
  493. mutex_lock(&msm_obj->lock);
  494. vma = lookup_vma(obj, aspace);
  495. if (!WARN_ON(!vma))
  496. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  497. msm_obj->flags);
  498. mutex_unlock(&msm_obj->lock);
  499. }
  500. void msm_gem_put_iova(struct drm_gem_object *obj,
  501. struct msm_gem_address_space *aspace)
  502. {
  503. // XXX TODO ..
  504. // NOTE: probably don't need a _locked() version.. we wouldn't
  505. // normally unmap here, but instead just mark that it could be
  506. // unmapped (if the iova refcnt drops to zero), but then later
  507. // if another _get_iova_locked() fails we can start unmapping
  508. // things that are no longer needed..
  509. }
  510. void msm_gem_aspace_domain_attach_detach_update(
  511. struct msm_gem_address_space *aspace,
  512. bool is_detach)
  513. {
  514. struct msm_gem_object *msm_obj;
  515. struct drm_gem_object *obj;
  516. struct aspace_client *aclient;
  517. int ret;
  518. uint64_t iova;
  519. if (!aspace)
  520. return;
  521. mutex_lock(&aspace->list_lock);
  522. if (is_detach) {
  523. /* Indicate to clients domain is getting detached */
  524. list_for_each_entry(aclient, &aspace->clients, list) {
  525. if (aclient->cb)
  526. aclient->cb(aclient->cb_data,
  527. is_detach);
  528. }
  529. /**
  530. * Unmap active buffers,
  531. * typically clients should do this when the callback is called,
  532. * but this needs to be done for the buffers which are not
  533. * attached to any planes.
  534. */
  535. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  536. obj = &msm_obj->base;
  537. if (obj->import_attach) {
  538. mutex_lock(&msm_obj->lock);
  539. put_iova(obj);
  540. msm_obj->obj_dirty = true;
  541. mutex_unlock(&msm_obj->lock);
  542. }
  543. }
  544. } else {
  545. /* map active buffers */
  546. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  547. obj = &msm_obj->base;
  548. ret = msm_gem_get_iova(obj, aspace, &iova);
  549. if (ret) {
  550. mutex_unlock(&aspace->list_lock);
  551. return;
  552. }
  553. }
  554. /* Indicate to clients domain is attached */
  555. list_for_each_entry(aclient, &aspace->clients, list) {
  556. if (aclient->cb)
  557. aclient->cb(aclient->cb_data,
  558. is_detach);
  559. }
  560. }
  561. mutex_unlock(&aspace->list_lock);
  562. }
  563. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  564. struct drm_mode_create_dumb *args)
  565. {
  566. args->pitch = align_pitch(args->width, args->bpp);
  567. args->size = PAGE_ALIGN(args->pitch * args->height);
  568. return msm_gem_new_handle(dev, file, args->size,
  569. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
  570. }
  571. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  572. uint32_t handle, uint64_t *offset)
  573. {
  574. struct drm_gem_object *obj;
  575. int ret = 0;
  576. /* GEM does all our handle to object mapping */
  577. obj = drm_gem_object_lookup(file, handle);
  578. if (obj == NULL) {
  579. ret = -ENOENT;
  580. goto fail;
  581. }
  582. *offset = msm_gem_mmap_offset(obj);
  583. drm_gem_object_put(obj);
  584. fail:
  585. return ret;
  586. }
  587. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  588. {
  589. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  590. int ret = 0;
  591. mutex_lock(&msm_obj->lock);
  592. if (WARN_ON(msm_obj->madv > madv)) {
  593. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  594. msm_obj->madv, madv);
  595. mutex_unlock(&msm_obj->lock);
  596. return ERR_PTR(-EBUSY);
  597. }
  598. /* increment vmap_count *before* vmap() call, so shrinker can
  599. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  600. * This guarantees that we won't try to msm_gem_vunmap() this
  601. * same object from within the vmap() call (while we already
  602. * hold msm_obj->lock)
  603. */
  604. msm_obj->vmap_count++;
  605. if (!msm_obj->vaddr) {
  606. struct page **pages = get_pages(obj);
  607. if (IS_ERR(pages)) {
  608. ret = PTR_ERR(pages);
  609. goto fail;
  610. }
  611. if (obj->import_attach) {
  612. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev)) {
  613. ret = dma_buf_begin_cpu_access(
  614. obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  615. if (ret)
  616. goto fail;
  617. }
  618. msm_obj->vaddr =
  619. dma_buf_vmap(obj->import_attach->dmabuf);
  620. } else {
  621. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  622. VM_MAP, PAGE_KERNEL);
  623. }
  624. if (msm_obj->vaddr == NULL) {
  625. ret = -ENOMEM;
  626. goto fail;
  627. }
  628. }
  629. mutex_unlock(&msm_obj->lock);
  630. return msm_obj->vaddr;
  631. fail:
  632. msm_obj->vmap_count--;
  633. mutex_unlock(&msm_obj->lock);
  634. return ERR_PTR(ret);
  635. }
  636. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  637. {
  638. return get_vaddr(obj, MSM_MADV_WILLNEED);
  639. }
  640. /*
  641. * Don't use this! It is for the very special case of dumping
  642. * submits from GPU hangs or faults, were the bo may already
  643. * be MSM_MADV_DONTNEED, but we know the buffer is still on the
  644. * active list.
  645. */
  646. void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
  647. {
  648. return get_vaddr(obj, __MSM_MADV_PURGED);
  649. }
  650. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  651. {
  652. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  653. mutex_lock(&msm_obj->lock);
  654. WARN_ON(msm_obj->vmap_count < 1);
  655. msm_obj->vmap_count--;
  656. mutex_unlock(&msm_obj->lock);
  657. }
  658. /* Update madvise status, returns true if not purged, else
  659. * false or -errno.
  660. */
  661. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  662. {
  663. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  664. mutex_lock(&msm_obj->lock);
  665. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  666. if (msm_obj->madv != __MSM_MADV_PURGED)
  667. msm_obj->madv = madv;
  668. madv = msm_obj->madv;
  669. mutex_unlock(&msm_obj->lock);
  670. return (madv != __MSM_MADV_PURGED);
  671. }
  672. void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  673. {
  674. struct drm_device *dev = obj->dev;
  675. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  676. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  677. WARN_ON(!is_purgeable(msm_obj));
  678. WARN_ON(obj->import_attach);
  679. mutex_lock_nested(&msm_obj->lock, subclass);
  680. put_iova(obj);
  681. if (msm_obj->aspace) {
  682. mutex_lock(&msm_obj->aspace->list_lock);
  683. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  684. obj);
  685. mutex_unlock(&msm_obj->aspace->list_lock);
  686. }
  687. msm_gem_vunmap_locked(obj);
  688. put_pages(obj);
  689. msm_obj->madv = __MSM_MADV_PURGED;
  690. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  691. drm_gem_free_mmap_offset(obj);
  692. /* Our goal here is to return as much of the memory as
  693. * is possible back to the system as we are called from OOM.
  694. * To do this we must instruct the shmfs to drop all of its
  695. * backing pages, *now*.
  696. */
  697. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  698. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  699. 0, (loff_t)-1);
  700. mutex_unlock(&msm_obj->lock);
  701. }
  702. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  703. {
  704. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  705. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  706. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  707. return;
  708. if (obj->import_attach) {
  709. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  710. if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
  711. dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  712. } else {
  713. vunmap(msm_obj->vaddr);
  714. }
  715. msm_obj->vaddr = NULL;
  716. }
  717. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  718. {
  719. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  720. mutex_lock_nested(&msm_obj->lock, subclass);
  721. msm_gem_vunmap_locked(obj);
  722. mutex_unlock(&msm_obj->lock);
  723. }
  724. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  725. {
  726. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  727. bool write = !!(op & MSM_PREP_WRITE);
  728. unsigned long remain =
  729. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  730. long ret;
  731. ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
  732. true, remain);
  733. if (ret == 0)
  734. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  735. else if (ret < 0)
  736. return ret;
  737. /* TODO cache maintenance */
  738. return 0;
  739. }
  740. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  741. {
  742. /* TODO cache maintenance */
  743. return 0;
  744. }
  745. #ifdef CONFIG_DEBUG_FS
  746. static void describe_fence(struct dma_fence *fence, const char *type,
  747. struct seq_file *m)
  748. {
  749. if (!dma_fence_is_signaled(fence))
  750. seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
  751. fence->ops->get_driver_name(fence),
  752. fence->ops->get_timeline_name(fence),
  753. fence->seqno);
  754. }
  755. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  756. {
  757. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  758. struct dma_resv *robj = msm_obj->resv;
  759. struct dma_resv_list *fobj;
  760. struct dma_fence *fence;
  761. struct msm_gem_vma *vma;
  762. uint64_t off = drm_vma_node_start(&obj->vma_node);
  763. const char *madv;
  764. mutex_lock(&msm_obj->lock);
  765. switch (msm_obj->madv) {
  766. case __MSM_MADV_PURGED:
  767. madv = " purged";
  768. break;
  769. case MSM_MADV_DONTNEED:
  770. madv = " purgeable";
  771. break;
  772. case MSM_MADV_WILLNEED:
  773. default:
  774. madv = "";
  775. break;
  776. }
  777. seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
  778. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  779. obj->name, kref_read(&obj->refcount),
  780. off, msm_obj->vaddr);
  781. seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
  782. if (!list_empty(&msm_obj->vmas)) {
  783. seq_puts(m, " vmas:");
  784. list_for_each_entry(vma, &msm_obj->vmas, list)
  785. seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
  786. vma->iova, vma->mapped ? "mapped" : "unmapped",
  787. vma->inuse);
  788. seq_puts(m, "\n");
  789. }
  790. rcu_read_lock();
  791. fobj = rcu_dereference(robj->fence);
  792. if (fobj) {
  793. unsigned int i, shared_count = fobj->shared_count;
  794. for (i = 0; i < shared_count; i++) {
  795. fence = rcu_dereference(fobj->shared[i]);
  796. describe_fence(fence, "Shared", m);
  797. }
  798. }
  799. fence = rcu_dereference(robj->fence_excl);
  800. if (fence)
  801. describe_fence(fence, "Exclusive", m);
  802. rcu_read_unlock();
  803. mutex_unlock(&msm_obj->lock);
  804. }
  805. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  806. {
  807. struct msm_gem_object *msm_obj;
  808. int count = 0;
  809. size_t size = 0;
  810. seq_puts(m, " flags id ref offset kaddr size madv name\n");
  811. list_for_each_entry(msm_obj, list, mm_list) {
  812. struct drm_gem_object *obj = &msm_obj->base;
  813. seq_puts(m, " ");
  814. msm_gem_describe(obj, m);
  815. count++;
  816. size += obj->size;
  817. }
  818. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  819. }
  820. #endif
  821. /* don't call directly! Use drm_gem_object_put() and friends */
  822. void msm_gem_free_object(struct drm_gem_object *obj)
  823. {
  824. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  825. struct drm_device *dev = obj->dev;
  826. struct msm_drm_private *priv = dev->dev_private;
  827. /* object should not be on active list: */
  828. WARN_ON(is_active(msm_obj));
  829. mutex_lock(&priv->mm_lock);
  830. list_del(&msm_obj->mm_list);
  831. mutex_unlock(&priv->mm_lock);
  832. mutex_lock(&msm_obj->lock);
  833. put_iova(obj);
  834. if (msm_obj->aspace) {
  835. mutex_lock(&msm_obj->aspace->list_lock);
  836. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  837. obj);
  838. mutex_unlock(&msm_obj->aspace->list_lock);
  839. }
  840. if (obj->import_attach) {
  841. if (msm_obj->vaddr)
  842. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  843. /* Don't drop the pages for imported dmabuf, as they are not
  844. * ours, just free the array we allocated:
  845. */
  846. if (msm_obj->pages)
  847. kvfree(msm_obj->pages);
  848. drm_prime_gem_destroy(obj, msm_obj->sgt);
  849. } else {
  850. msm_gem_vunmap_locked(obj);
  851. put_pages(obj);
  852. }
  853. if (msm_obj->resv == &msm_obj->_resv)
  854. dma_resv_fini(msm_obj->resv);
  855. drm_gem_object_release(obj);
  856. mutex_unlock(&msm_obj->lock);
  857. kfree(msm_obj);
  858. }
  859. /* convenience method to construct a GEM buffer object, and userspace handle */
  860. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  861. uint32_t size, uint32_t flags, uint32_t *handle,
  862. char *name)
  863. {
  864. struct drm_gem_object *obj;
  865. int ret;
  866. obj = msm_gem_new(dev, size, flags);
  867. if (IS_ERR(obj))
  868. return PTR_ERR(obj);
  869. if (name)
  870. msm_gem_object_set_name(obj, "%s", name);
  871. ret = drm_gem_handle_create(file, obj, handle);
  872. /* drop reference from allocate - handle holds it now */
  873. drm_gem_object_put(obj);
  874. return ret;
  875. }
  876. static int msm_gem_new_impl(struct drm_device *dev,
  877. uint32_t size, uint32_t flags,
  878. struct dma_resv *resv,
  879. struct drm_gem_object **obj,
  880. bool struct_mutex_locked)
  881. {
  882. struct msm_drm_private *priv = dev->dev_private;
  883. struct msm_gem_object *msm_obj;
  884. switch (flags & MSM_BO_CACHE_MASK) {
  885. case MSM_BO_UNCACHED:
  886. case MSM_BO_CACHED:
  887. case MSM_BO_WC:
  888. break;
  889. default:
  890. dev_err(dev->dev, "invalid cache flag: %x\n",
  891. (flags & MSM_BO_CACHE_MASK));
  892. return -EINVAL;
  893. }
  894. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  895. if (!msm_obj)
  896. return -ENOMEM;
  897. mutex_init(&msm_obj->lock);
  898. msm_obj->flags = flags;
  899. msm_obj->madv = MSM_MADV_WILLNEED;
  900. if (resv) {
  901. msm_obj->resv = resv;
  902. } else {
  903. msm_obj->resv = &msm_obj->_resv;
  904. dma_resv_init(msm_obj->resv);
  905. }
  906. INIT_LIST_HEAD(&msm_obj->submit_entry);
  907. INIT_LIST_HEAD(&msm_obj->vmas);
  908. INIT_LIST_HEAD(&msm_obj->iova_list);
  909. msm_obj->aspace = msm_gem_smmu_address_space_get(dev,
  910. MSM_SMMU_DOMAIN_UNSECURE);
  911. if (IS_ERR(msm_obj->aspace))
  912. msm_obj->aspace = NULL;
  913. msm_obj->in_active_list = false;
  914. msm_obj->obj_dirty = false;
  915. mutex_lock(&priv->mm_lock);
  916. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  917. mutex_unlock(&priv->mm_lock);
  918. *obj = &msm_obj->base;
  919. return 0;
  920. }
  921. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  922. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  923. {
  924. struct msm_drm_private *priv = dev->dev_private;
  925. struct drm_gem_object *obj = NULL;
  926. bool use_vram = false;
  927. int ret;
  928. size = PAGE_ALIGN(size);
  929. if (!iommu_present(&platform_bus_type))
  930. use_vram = true;
  931. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  932. use_vram = true;
  933. if (WARN_ON(use_vram && !priv->vram.size))
  934. return ERR_PTR(-EINVAL);
  935. /* Disallow zero sized objects as they make the underlying
  936. * infrastructure grumpy
  937. */
  938. if (size == 0)
  939. return ERR_PTR(-EINVAL);
  940. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  941. if (ret)
  942. goto fail;
  943. if (use_vram) {
  944. struct msm_gem_vma *vma;
  945. struct page **pages;
  946. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  947. mutex_lock(&msm_obj->lock);
  948. vma = add_vma(obj, NULL);
  949. mutex_unlock(&msm_obj->lock);
  950. if (IS_ERR(vma)) {
  951. ret = PTR_ERR(vma);
  952. goto fail;
  953. }
  954. to_msm_bo(obj)->vram_node = &vma->node;
  955. drm_gem_private_object_init(dev, obj, size);
  956. pages = get_pages(obj);
  957. if (IS_ERR(pages)) {
  958. ret = PTR_ERR(pages);
  959. goto fail;
  960. }
  961. vma->iova = physaddr(obj);
  962. } else {
  963. ret = drm_gem_object_init(dev, obj, size);
  964. if (ret)
  965. goto fail;
  966. }
  967. return obj;
  968. fail:
  969. drm_gem_object_put(obj);
  970. return ERR_PTR(ret);
  971. }
  972. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  973. uint32_t size, uint32_t flags)
  974. {
  975. return _msm_gem_new(dev, size, flags, true);
  976. }
  977. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  978. uint32_t size, uint32_t flags)
  979. {
  980. return _msm_gem_new(dev, size, flags, false);
  981. }
  982. int msm_gem_delayed_import(struct drm_gem_object *obj)
  983. {
  984. struct dma_buf_attachment *attach;
  985. struct sg_table *sgt;
  986. struct msm_gem_object *msm_obj;
  987. int ret = 0;
  988. if (!obj) {
  989. DRM_ERROR("NULL drm gem object\n");
  990. return -EINVAL;
  991. }
  992. msm_obj = to_msm_bo(obj);
  993. if (!obj->import_attach) {
  994. DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
  995. return -EINVAL;
  996. }
  997. attach = obj->import_attach;
  998. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  999. if (msm_obj->flags & MSM_BO_SKIPSYNC)
  1000. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  1001. /*
  1002. * dma_buf_map_attachment will call dma_map_sg for ion buffer
  1003. * mapping, and iova will get mapped when the function returns.
  1004. */
  1005. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  1006. if (IS_ERR(sgt)) {
  1007. ret = PTR_ERR(sgt);
  1008. DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
  1009. ret);
  1010. goto fail_import;
  1011. }
  1012. msm_obj->sgt = sgt;
  1013. msm_obj->pages = NULL;
  1014. fail_import:
  1015. return ret;
  1016. }
  1017. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  1018. struct dma_buf *dmabuf, struct sg_table *sgt)
  1019. {
  1020. struct msm_gem_object *msm_obj;
  1021. struct drm_gem_object *obj = NULL;
  1022. uint32_t size;
  1023. int ret;
  1024. unsigned long flags = 0;
  1025. size = PAGE_ALIGN(dmabuf->size);
  1026. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
  1027. false);
  1028. if (ret)
  1029. goto fail;
  1030. drm_gem_private_object_init(dev, obj, size);
  1031. msm_obj = to_msm_bo(obj);
  1032. mutex_lock(&msm_obj->lock);
  1033. msm_obj->sgt = sgt;
  1034. msm_obj->pages = NULL;
  1035. /*
  1036. * 1) If sg table is NULL, user should call msm_gem_delayed_import
  1037. * to add back the sg table to the drm gem object.
  1038. *
  1039. * 2) Add buffer flag unconditionally for all import cases.
  1040. * # Cached buffer will be attached immediately hence sgt will
  1041. * be available upon gem obj creation.
  1042. * # Un-cached buffer will follow delayed attach hence sgt
  1043. * will be NULL upon gem obj creation.
  1044. */
  1045. msm_obj->flags |= MSM_BO_EXTBUF;
  1046. /*
  1047. * For all uncached buffers, there is no need to perform cache
  1048. * maintenance on dma map/unmap time.
  1049. */
  1050. ret = dma_buf_get_flags(dmabuf, &flags);
  1051. if (ret) {
  1052. DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
  1053. } else if ((flags & ION_FLAG_CACHED) == 0) {
  1054. DRM_DEBUG("Buffer is uncached type\n");
  1055. msm_obj->flags |= MSM_BO_SKIPSYNC;
  1056. }
  1057. mutex_unlock(&msm_obj->lock);
  1058. return obj;
  1059. fail:
  1060. drm_gem_object_put(obj);
  1061. return ERR_PTR(ret);
  1062. }
  1063. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1064. uint32_t flags, struct msm_gem_address_space *aspace,
  1065. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  1066. {
  1067. void *vaddr;
  1068. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  1069. int ret;
  1070. if (IS_ERR(obj))
  1071. return ERR_CAST(obj);
  1072. if (iova) {
  1073. ret = msm_gem_get_iova(obj, aspace, iova);
  1074. if (ret)
  1075. goto err;
  1076. }
  1077. vaddr = msm_gem_get_vaddr(obj);
  1078. if (IS_ERR(vaddr)) {
  1079. msm_gem_put_iova(obj, aspace);
  1080. ret = PTR_ERR(vaddr);
  1081. goto err;
  1082. }
  1083. if (bo)
  1084. *bo = obj;
  1085. return vaddr;
  1086. err:
  1087. if (locked)
  1088. drm_gem_object_put_locked(obj);
  1089. else
  1090. drm_gem_object_put(obj);
  1091. return ERR_PTR(ret);
  1092. }
  1093. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1094. uint32_t flags, struct msm_gem_address_space *aspace,
  1095. struct drm_gem_object **bo, uint64_t *iova)
  1096. {
  1097. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  1098. }
  1099. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  1100. uint32_t flags, struct msm_gem_address_space *aspace,
  1101. struct drm_gem_object **bo, uint64_t *iova)
  1102. {
  1103. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  1104. }
  1105. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1106. struct msm_gem_address_space *aspace, bool locked)
  1107. {
  1108. if (IS_ERR_OR_NULL(bo))
  1109. return;
  1110. msm_gem_put_vaddr(bo);
  1111. msm_gem_unpin_iova(bo, aspace);
  1112. if (locked)
  1113. drm_gem_object_put_locked(bo);
  1114. else
  1115. drm_gem_object_put(bo);
  1116. }
  1117. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1118. {
  1119. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1120. va_list ap;
  1121. if (!fmt)
  1122. return;
  1123. va_start(ap, fmt);
  1124. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1125. va_end(ap);
  1126. }
  1127. void msm_gem_put_buffer(struct drm_gem_object *gem)
  1128. {
  1129. struct msm_gem_object *msm_gem;
  1130. if (!gem)
  1131. return;
  1132. msm_gem = to_msm_bo(gem);
  1133. msm_gem_put_iova(gem, msm_gem->aspace);
  1134. msm_gem_put_vaddr(gem);
  1135. }
  1136. int msm_gem_get_buffer(struct drm_gem_object *gem,
  1137. struct drm_device *dev, struct drm_framebuffer *fb,
  1138. uint32_t align_size)
  1139. {
  1140. struct msm_gem_object *msm_gem;
  1141. uint32_t size;
  1142. uint64_t iova_aligned;
  1143. int ret = -EINVAL;
  1144. if (!gem) {
  1145. DRM_ERROR("invalid drm gem");
  1146. return ret;
  1147. }
  1148. msm_gem = to_msm_bo(gem);
  1149. size = PAGE_ALIGN(gem->size);
  1150. if (size < (align_size + GUARD_BYTES)) {
  1151. DRM_ERROR("invalid gem size");
  1152. goto exit;
  1153. }
  1154. msm_gem_smmu_address_space_get(dev, MSM_SMMU_DOMAIN_UNSECURE);
  1155. if (PTR_ERR(msm_gem->aspace) == -ENODEV) {
  1156. DRM_DEBUG("IOMMU not present, relying on VRAM.");
  1157. } else if (IS_ERR_OR_NULL(msm_gem->aspace)) {
  1158. ret = PTR_ERR(msm_gem->aspace);
  1159. DRM_ERROR("failed to get aspace");
  1160. goto exit;
  1161. }
  1162. ret = msm_gem_get_iova(gem, msm_gem->aspace, &msm_gem->iova);
  1163. if (ret) {
  1164. DRM_ERROR("failed to get the iova ret %d", ret);
  1165. goto exit;
  1166. }
  1167. msm_gem_get_vaddr(gem);
  1168. if (IS_ERR_OR_NULL(msm_gem->vaddr)) {
  1169. DRM_ERROR("failed to get vaddr");
  1170. goto exit;
  1171. }
  1172. iova_aligned = (msm_gem->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  1173. msm_gem->offset = iova_aligned - msm_gem->iova;
  1174. msm_gem->iova = msm_gem->iova + msm_gem->offset;
  1175. return 0;
  1176. exit:
  1177. msm_gem_put_buffer(gem);
  1178. return ret;
  1179. }