msm_gem.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/spinlock.h>
  19. #include <linux/shmem_fs.h>
  20. #include <linux/dma-buf.h>
  21. #include <linux/pfn_t.h>
  22. #include <linux/ion.h>
  23. #include "msm_drv.h"
  24. #include "msm_gem.h"
  25. #include "msm_mmu.h"
  26. #include "sde_dbg.h"
  27. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  28. static dma_addr_t physaddr(struct drm_gem_object *obj)
  29. {
  30. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  31. struct msm_drm_private *priv = obj->dev->dev_private;
  32. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  33. priv->vram.paddr;
  34. }
  35. static bool use_pages(struct drm_gem_object *obj)
  36. {
  37. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  38. return !msm_obj->vram_node;
  39. }
  40. /* allocate pages from VRAM carveout, used when no IOMMU: */
  41. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  42. {
  43. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  44. struct msm_drm_private *priv = obj->dev->dev_private;
  45. dma_addr_t paddr;
  46. struct page **p;
  47. int ret, i;
  48. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  49. if (!p)
  50. return ERR_PTR(-ENOMEM);
  51. spin_lock(&priv->vram.lock);
  52. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  53. spin_unlock(&priv->vram.lock);
  54. if (ret) {
  55. kvfree(p);
  56. return ERR_PTR(ret);
  57. }
  58. paddr = physaddr(obj);
  59. for (i = 0; i < npages; i++) {
  60. p[i] = phys_to_page(paddr);
  61. paddr += PAGE_SIZE;
  62. }
  63. return p;
  64. }
  65. static struct page **get_pages(struct drm_gem_object *obj)
  66. {
  67. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  68. struct device *aspace_dev;
  69. if (obj->import_attach)
  70. return msm_obj->pages;
  71. if (!msm_obj->pages) {
  72. struct drm_device *dev = obj->dev;
  73. struct page **p;
  74. int npages = obj->size >> PAGE_SHIFT;
  75. if (use_pages(obj))
  76. p = drm_gem_get_pages(obj);
  77. else
  78. p = get_pages_vram(obj, npages);
  79. if (IS_ERR(p)) {
  80. dev_err(dev->dev, "could not get pages: %ld\n",
  81. PTR_ERR(p));
  82. return p;
  83. }
  84. msm_obj->pages = p;
  85. msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  86. if (IS_ERR(msm_obj->sgt)) {
  87. void *ptr = ERR_CAST(msm_obj->sgt);
  88. dev_err(dev->dev, "failed to allocate sgt\n");
  89. msm_obj->sgt = NULL;
  90. return ptr;
  91. }
  92. /* For non-cached buffers, ensure the new pages are clean
  93. * because display controller, GPU, etc. are not coherent:
  94. */
  95. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
  96. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  97. dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
  98. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  99. }
  100. }
  101. return msm_obj->pages;
  102. }
  103. static void put_pages_vram(struct drm_gem_object *obj)
  104. {
  105. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  106. struct msm_drm_private *priv = obj->dev->dev_private;
  107. spin_lock(&priv->vram.lock);
  108. drm_mm_remove_node(msm_obj->vram_node);
  109. spin_unlock(&priv->vram.lock);
  110. kvfree(msm_obj->pages);
  111. }
  112. static void put_pages(struct drm_gem_object *obj)
  113. {
  114. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  115. if (msm_obj->pages) {
  116. if (msm_obj->sgt) {
  117. sg_free_table(msm_obj->sgt);
  118. kfree(msm_obj->sgt);
  119. }
  120. if (use_pages(obj))
  121. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  122. else
  123. put_pages_vram(obj);
  124. msm_obj->pages = NULL;
  125. }
  126. }
  127. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  128. {
  129. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  130. struct page **p;
  131. mutex_lock(&msm_obj->lock);
  132. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  133. mutex_unlock(&msm_obj->lock);
  134. return ERR_PTR(-EBUSY);
  135. }
  136. p = get_pages(obj);
  137. mutex_unlock(&msm_obj->lock);
  138. return p;
  139. }
  140. void msm_gem_put_pages(struct drm_gem_object *obj)
  141. {
  142. /* when we start tracking the pin count, then do something here */
  143. }
  144. void msm_gem_sync(struct drm_gem_object *obj)
  145. {
  146. struct msm_gem_object *msm_obj;
  147. struct device *aspace_dev;
  148. if (!obj)
  149. return;
  150. msm_obj = to_msm_bo(obj);
  151. /*
  152. * dma_sync_sg_for_device synchronises a single contiguous or
  153. * scatter/gather mapping for the CPU and device.
  154. */
  155. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  156. dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
  157. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  158. }
  159. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  160. struct vm_area_struct *vma)
  161. {
  162. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  163. vma->vm_flags &= ~VM_PFNMAP;
  164. vma->vm_flags |= VM_MIXEDMAP;
  165. if (msm_obj->flags & MSM_BO_WC) {
  166. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  167. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  168. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  169. } else {
  170. /*
  171. * Shunt off cached objs to shmem file so they have their own
  172. * address_space (so unmap_mapping_range does what we want,
  173. * in particular in the case of mmap'd dmabufs)
  174. */
  175. fput(vma->vm_file);
  176. get_file(obj->filp);
  177. vma->vm_pgoff = 0;
  178. vma->vm_file = obj->filp;
  179. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  180. }
  181. return 0;
  182. }
  183. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  184. {
  185. int ret;
  186. ret = drm_gem_mmap(filp, vma);
  187. if (ret) {
  188. DBG("mmap failed: %d", ret);
  189. return ret;
  190. }
  191. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  192. }
  193. vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  194. {
  195. struct vm_area_struct *vma = vmf->vma;
  196. struct drm_gem_object *obj = vma->vm_private_data;
  197. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  198. struct page **pages;
  199. unsigned long pfn;
  200. pgoff_t pgoff;
  201. int err;
  202. vm_fault_t ret;
  203. /*
  204. * vm_ops.open/drm_gem_mmap_obj and close get and put
  205. * a reference on obj. So, we dont need to hold one here.
  206. */
  207. err = mutex_lock_interruptible(&msm_obj->lock);
  208. if (err) {
  209. ret = VM_FAULT_NOPAGE;
  210. goto out;
  211. }
  212. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  213. mutex_unlock(&msm_obj->lock);
  214. return VM_FAULT_SIGBUS;
  215. }
  216. /* make sure we have pages attached now */
  217. pages = get_pages(obj);
  218. if (IS_ERR(pages)) {
  219. ret = vmf_error(PTR_ERR(pages));
  220. goto out_unlock;
  221. }
  222. /* We don't use vmf->pgoff since that has the fake offset: */
  223. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  224. pfn = page_to_pfn(pages[pgoff]);
  225. VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
  226. pfn, pfn << PAGE_SHIFT);
  227. ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  228. out_unlock:
  229. mutex_unlock(&msm_obj->lock);
  230. out:
  231. return ret;
  232. }
  233. /** get mmap offset */
  234. static uint64_t mmap_offset(struct drm_gem_object *obj)
  235. {
  236. struct drm_device *dev = obj->dev;
  237. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  238. int ret;
  239. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  240. /* Make it mmapable */
  241. ret = drm_gem_create_mmap_offset(obj);
  242. if (ret) {
  243. dev_err(dev->dev, "could not allocate mmap offset\n");
  244. return 0;
  245. }
  246. return drm_vma_node_offset_addr(&obj->vma_node);
  247. }
  248. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  249. {
  250. uint64_t offset;
  251. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  252. mutex_lock(&msm_obj->lock);
  253. offset = mmap_offset(obj);
  254. mutex_unlock(&msm_obj->lock);
  255. return offset;
  256. }
  257. dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
  258. {
  259. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  260. struct sg_table *sgt;
  261. if (!msm_obj->sgt) {
  262. sgt = dma_buf_map_attachment(obj->import_attach,
  263. DMA_BIDIRECTIONAL);
  264. if (IS_ERR_OR_NULL(sgt)) {
  265. DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
  266. PTR_ERR(sgt));
  267. return 0;
  268. }
  269. msm_obj->sgt = sgt;
  270. }
  271. return sg_phys(msm_obj->sgt->sgl);
  272. }
  273. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  274. struct msm_gem_address_space *aspace)
  275. {
  276. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  277. struct msm_gem_vma *vma;
  278. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  279. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  280. if (!vma)
  281. return ERR_PTR(-ENOMEM);
  282. vma->aspace = aspace;
  283. msm_obj->aspace = aspace;
  284. list_add_tail(&vma->list, &msm_obj->vmas);
  285. return vma;
  286. }
  287. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  288. struct msm_gem_address_space *aspace)
  289. {
  290. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  291. struct msm_gem_vma *vma;
  292. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  293. list_for_each_entry(vma, &msm_obj->vmas, list) {
  294. if (vma->aspace == aspace)
  295. return vma;
  296. }
  297. return NULL;
  298. }
  299. static void del_vma(struct msm_gem_vma *vma)
  300. {
  301. if (!vma)
  302. return;
  303. list_del(&vma->list);
  304. kfree(vma);
  305. }
  306. /* Called with msm_obj->lock locked */
  307. static void
  308. put_iova(struct drm_gem_object *obj)
  309. {
  310. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  311. struct msm_gem_vma *vma, *tmp;
  312. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  313. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  314. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  315. msm_obj->flags);
  316. /*
  317. * put_iova removes the domain connected to the obj which makes
  318. * the aspace inaccessible. Store the aspace, as it is used to
  319. * update the active_list during gem_free_obj and gem_purge.
  320. */
  321. msm_obj->aspace = vma->aspace;
  322. del_vma(vma);
  323. }
  324. }
  325. /* get iova, taking a reference. Should have a matching put */
  326. static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
  327. struct msm_gem_address_space *aspace, uint64_t *iova)
  328. {
  329. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  330. struct msm_gem_vma *vma;
  331. int ret = 0;
  332. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  333. vma = lookup_vma(obj, aspace);
  334. if (!vma) {
  335. struct page **pages;
  336. /* perform delayed import for buffers without existing sgt */
  337. if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))) {
  338. ret = msm_gem_delayed_import(obj);
  339. if (ret) {
  340. DRM_ERROR("delayed dma-buf import failed %d\n",
  341. ret);
  342. goto unlock;
  343. }
  344. }
  345. vma = add_vma(obj, aspace);
  346. if (IS_ERR(vma)) {
  347. ret = PTR_ERR(vma);
  348. goto unlock;
  349. }
  350. pages = get_pages(obj);
  351. if (IS_ERR(pages)) {
  352. ret = PTR_ERR(pages);
  353. goto fail;
  354. }
  355. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  356. obj->size >> PAGE_SHIFT,
  357. msm_obj->flags);
  358. if (ret)
  359. goto fail;
  360. }
  361. *iova = vma->iova;
  362. if (aspace && !msm_obj->in_active_list) {
  363. mutex_lock(&aspace->list_lock);
  364. msm_gem_add_obj_to_aspace_active_list(aspace, obj);
  365. mutex_unlock(&aspace->list_lock);
  366. }
  367. mutex_unlock(&msm_obj->lock);
  368. return 0;
  369. fail:
  370. del_vma(vma);
  371. unlock:
  372. mutex_unlock(&msm_obj->lock);
  373. return ret;
  374. }
  375. static int msm_gem_pin_iova(struct drm_gem_object *obj,
  376. struct msm_gem_address_space *aspace)
  377. {
  378. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  379. struct msm_gem_vma *vma;
  380. struct page **pages;
  381. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  382. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
  383. return -EBUSY;
  384. vma = lookup_vma(obj, aspace);
  385. if (WARN_ON(!vma))
  386. return -EINVAL;
  387. pages = get_pages(obj);
  388. if (IS_ERR(pages))
  389. return PTR_ERR(pages);
  390. return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  391. obj->size >> PAGE_SHIFT, msm_obj->flags);
  392. }
  393. /* get iova and pin it. Should have a matching put */
  394. int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
  395. struct msm_gem_address_space *aspace, uint64_t *iova)
  396. {
  397. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  398. u64 local;
  399. int ret;
  400. mutex_lock(&msm_obj->lock);
  401. ret = msm_gem_get_iova_locked(obj, aspace, &local);
  402. if (!ret)
  403. ret = msm_gem_pin_iova(obj, aspace);
  404. if (!ret)
  405. *iova = local;
  406. mutex_unlock(&msm_obj->lock);
  407. return ret;
  408. }
  409. int msm_gem_get_iova(struct drm_gem_object *obj,
  410. struct msm_gem_address_space *aspace, uint64_t *iova)
  411. {
  412. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  413. int ret;
  414. mutex_lock(&msm_obj->lock);
  415. ret = msm_gem_get_iova_locked(obj, aspace, iova);
  416. mutex_unlock(&msm_obj->lock);
  417. return ret;
  418. }
  419. /* get iova without taking a reference, used in places where you have
  420. * already done a 'msm_gem_get_iova()'.
  421. */
  422. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  423. struct msm_gem_address_space *aspace)
  424. {
  425. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  426. struct msm_gem_vma *vma;
  427. mutex_lock(&msm_obj->lock);
  428. vma = lookup_vma(obj, aspace);
  429. mutex_unlock(&msm_obj->lock);
  430. WARN_ON(!vma);
  431. return vma ? vma->iova : 0;
  432. }
  433. /*
  434. * Unpin a iova by updating the reference counts. The memory isn't actually
  435. * purged until something else (shrinker, mm_notifier, destroy, etc) decides
  436. * to get rid of it
  437. */
  438. void msm_gem_unpin_iova(struct drm_gem_object *obj,
  439. struct msm_gem_address_space *aspace)
  440. {
  441. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  442. struct msm_gem_vma *vma;
  443. mutex_lock(&msm_obj->lock);
  444. vma = lookup_vma(obj, aspace);
  445. if (!WARN_ON(!vma))
  446. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  447. msm_obj->flags);
  448. mutex_unlock(&msm_obj->lock);
  449. }
  450. void msm_gem_put_iova(struct drm_gem_object *obj,
  451. struct msm_gem_address_space *aspace)
  452. {
  453. // XXX TODO ..
  454. // NOTE: probably don't need a _locked() version.. we wouldn't
  455. // normally unmap here, but instead just mark that it could be
  456. // unmapped (if the iova refcnt drops to zero), but then later
  457. // if another _get_iova_locked() fails we can start unmapping
  458. // things that are no longer needed..
  459. }
  460. void msm_gem_aspace_domain_attach_detach_update(
  461. struct msm_gem_address_space *aspace,
  462. bool is_detach)
  463. {
  464. struct msm_gem_object *msm_obj;
  465. struct drm_gem_object *obj;
  466. struct aspace_client *aclient;
  467. int ret;
  468. uint64_t iova;
  469. if (!aspace)
  470. return;
  471. mutex_lock(&aspace->list_lock);
  472. if (is_detach) {
  473. /* Indicate to clients domain is getting detached */
  474. list_for_each_entry(aclient, &aspace->clients, list) {
  475. if (aclient->cb)
  476. aclient->cb(aclient->cb_data,
  477. is_detach);
  478. }
  479. /**
  480. * Unmap active buffers,
  481. * typically clients should do this when the callback is called,
  482. * but this needs to be done for the buffers which are not
  483. * attached to any planes.
  484. */
  485. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  486. obj = &msm_obj->base;
  487. if (obj->import_attach) {
  488. mutex_lock(&msm_obj->lock);
  489. put_iova(obj);
  490. mutex_unlock(&msm_obj->lock);
  491. }
  492. }
  493. } else {
  494. /* map active buffers */
  495. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  496. obj = &msm_obj->base;
  497. ret = msm_gem_get_iova(obj, aspace, &iova);
  498. if (ret) {
  499. mutex_unlock(&aspace->list_lock);
  500. return;
  501. }
  502. }
  503. /* Indicate to clients domain is attached */
  504. list_for_each_entry(aclient, &aspace->clients, list) {
  505. if (aclient->cb)
  506. aclient->cb(aclient->cb_data,
  507. is_detach);
  508. }
  509. }
  510. mutex_unlock(&aspace->list_lock);
  511. }
  512. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  513. struct drm_mode_create_dumb *args)
  514. {
  515. args->pitch = align_pitch(args->width, args->bpp);
  516. args->size = PAGE_ALIGN(args->pitch * args->height);
  517. return msm_gem_new_handle(dev, file, args->size,
  518. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
  519. }
  520. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  521. uint32_t handle, uint64_t *offset)
  522. {
  523. struct drm_gem_object *obj;
  524. int ret = 0;
  525. /* GEM does all our handle to object mapping */
  526. obj = drm_gem_object_lookup(file, handle);
  527. if (obj == NULL) {
  528. ret = -ENOENT;
  529. goto fail;
  530. }
  531. *offset = msm_gem_mmap_offset(obj);
  532. drm_gem_object_put_unlocked(obj);
  533. fail:
  534. return ret;
  535. }
  536. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  537. {
  538. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  539. int ret = 0;
  540. mutex_lock(&msm_obj->lock);
  541. if (WARN_ON(msm_obj->madv > madv)) {
  542. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  543. msm_obj->madv, madv);
  544. mutex_unlock(&msm_obj->lock);
  545. return ERR_PTR(-EBUSY);
  546. }
  547. /* increment vmap_count *before* vmap() call, so shrinker can
  548. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  549. * This guarantees that we won't try to msm_gem_vunmap() this
  550. * same object from within the vmap() call (while we already
  551. * hold msm_obj->lock)
  552. */
  553. msm_obj->vmap_count++;
  554. if (!msm_obj->vaddr) {
  555. struct page **pages = get_pages(obj);
  556. if (IS_ERR(pages)) {
  557. ret = PTR_ERR(pages);
  558. goto fail;
  559. }
  560. if (obj->import_attach) {
  561. ret = dma_buf_begin_cpu_access(
  562. obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  563. if (ret)
  564. goto fail;
  565. msm_obj->vaddr =
  566. dma_buf_vmap(obj->import_attach->dmabuf);
  567. } else {
  568. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  569. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  570. }
  571. if (msm_obj->vaddr == NULL) {
  572. ret = -ENOMEM;
  573. goto fail;
  574. }
  575. }
  576. mutex_unlock(&msm_obj->lock);
  577. return msm_obj->vaddr;
  578. fail:
  579. msm_obj->vmap_count--;
  580. mutex_unlock(&msm_obj->lock);
  581. return ERR_PTR(ret);
  582. }
  583. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  584. {
  585. return get_vaddr(obj, MSM_MADV_WILLNEED);
  586. }
  587. /*
  588. * Don't use this! It is for the very special case of dumping
  589. * submits from GPU hangs or faults, were the bo may already
  590. * be MSM_MADV_DONTNEED, but we know the buffer is still on the
  591. * active list.
  592. */
  593. void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
  594. {
  595. return get_vaddr(obj, __MSM_MADV_PURGED);
  596. }
  597. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  598. {
  599. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  600. mutex_lock(&msm_obj->lock);
  601. WARN_ON(msm_obj->vmap_count < 1);
  602. msm_obj->vmap_count--;
  603. mutex_unlock(&msm_obj->lock);
  604. }
  605. /* Update madvise status, returns true if not purged, else
  606. * false or -errno.
  607. */
  608. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  609. {
  610. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  611. mutex_lock(&msm_obj->lock);
  612. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  613. if (msm_obj->madv != __MSM_MADV_PURGED)
  614. msm_obj->madv = madv;
  615. madv = msm_obj->madv;
  616. mutex_unlock(&msm_obj->lock);
  617. return (madv != __MSM_MADV_PURGED);
  618. }
  619. void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  620. {
  621. struct drm_device *dev = obj->dev;
  622. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  623. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  624. WARN_ON(!is_purgeable(msm_obj));
  625. WARN_ON(obj->import_attach);
  626. mutex_lock_nested(&msm_obj->lock, subclass);
  627. put_iova(obj);
  628. if (msm_obj->aspace) {
  629. mutex_lock(&msm_obj->aspace->list_lock);
  630. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  631. obj);
  632. mutex_unlock(&msm_obj->aspace->list_lock);
  633. }
  634. msm_gem_vunmap_locked(obj);
  635. put_pages(obj);
  636. msm_obj->madv = __MSM_MADV_PURGED;
  637. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  638. drm_gem_free_mmap_offset(obj);
  639. /* Our goal here is to return as much of the memory as
  640. * is possible back to the system as we are called from OOM.
  641. * To do this we must instruct the shmfs to drop all of its
  642. * backing pages, *now*.
  643. */
  644. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  645. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  646. 0, (loff_t)-1);
  647. mutex_unlock(&msm_obj->lock);
  648. }
  649. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  650. {
  651. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  652. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  653. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  654. return;
  655. if (obj->import_attach) {
  656. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  657. dma_buf_end_cpu_access(obj->import_attach->dmabuf,
  658. DMA_BIDIRECTIONAL);
  659. } else {
  660. vunmap(msm_obj->vaddr);
  661. }
  662. msm_obj->vaddr = NULL;
  663. }
  664. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  665. {
  666. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  667. mutex_lock_nested(&msm_obj->lock, subclass);
  668. msm_gem_vunmap_locked(obj);
  669. mutex_unlock(&msm_obj->lock);
  670. }
  671. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  672. {
  673. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  674. bool write = !!(op & MSM_PREP_WRITE);
  675. unsigned long remain =
  676. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  677. long ret;
  678. ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
  679. true, remain);
  680. if (ret == 0)
  681. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  682. else if (ret < 0)
  683. return ret;
  684. /* TODO cache maintenance */
  685. return 0;
  686. }
  687. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  688. {
  689. /* TODO cache maintenance */
  690. return 0;
  691. }
  692. #ifdef CONFIG_DEBUG_FS
  693. static void describe_fence(struct dma_fence *fence, const char *type,
  694. struct seq_file *m)
  695. {
  696. if (!dma_fence_is_signaled(fence))
  697. seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
  698. fence->ops->get_driver_name(fence),
  699. fence->ops->get_timeline_name(fence),
  700. fence->seqno);
  701. }
  702. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  703. {
  704. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  705. struct dma_resv *robj = msm_obj->resv;
  706. struct dma_resv_list *fobj;
  707. struct dma_fence *fence;
  708. struct msm_gem_vma *vma;
  709. uint64_t off = drm_vma_node_start(&obj->vma_node);
  710. const char *madv;
  711. mutex_lock(&msm_obj->lock);
  712. switch (msm_obj->madv) {
  713. case __MSM_MADV_PURGED:
  714. madv = " purged";
  715. break;
  716. case MSM_MADV_DONTNEED:
  717. madv = " purgeable";
  718. break;
  719. case MSM_MADV_WILLNEED:
  720. default:
  721. madv = "";
  722. break;
  723. }
  724. seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
  725. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  726. obj->name, kref_read(&obj->refcount),
  727. off, msm_obj->vaddr);
  728. seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
  729. if (!list_empty(&msm_obj->vmas)) {
  730. seq_puts(m, " vmas:");
  731. list_for_each_entry(vma, &msm_obj->vmas, list)
  732. seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
  733. vma->iova, vma->mapped ? "mapped" : "unmapped",
  734. vma->inuse);
  735. seq_puts(m, "\n");
  736. }
  737. rcu_read_lock();
  738. fobj = rcu_dereference(robj->fence);
  739. if (fobj) {
  740. unsigned int i, shared_count = fobj->shared_count;
  741. for (i = 0; i < shared_count; i++) {
  742. fence = rcu_dereference(fobj->shared[i]);
  743. describe_fence(fence, "Shared", m);
  744. }
  745. }
  746. fence = rcu_dereference(robj->fence_excl);
  747. if (fence)
  748. describe_fence(fence, "Exclusive", m);
  749. rcu_read_unlock();
  750. mutex_unlock(&msm_obj->lock);
  751. }
  752. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  753. {
  754. struct msm_gem_object *msm_obj;
  755. int count = 0;
  756. size_t size = 0;
  757. seq_puts(m, " flags id ref offset kaddr size madv name\n");
  758. list_for_each_entry(msm_obj, list, mm_list) {
  759. struct drm_gem_object *obj = &msm_obj->base;
  760. seq_puts(m, " ");
  761. msm_gem_describe(obj, m);
  762. count++;
  763. size += obj->size;
  764. }
  765. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  766. }
  767. #endif
  768. /* don't call directly! Use drm_gem_object_put() and friends */
  769. void msm_gem_free_object(struct drm_gem_object *obj)
  770. {
  771. struct drm_device *dev = obj->dev;
  772. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  773. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  774. /* object should not be on active list: */
  775. WARN_ON(is_active(msm_obj));
  776. list_del(&msm_obj->mm_list);
  777. mutex_lock(&msm_obj->lock);
  778. put_iova(obj);
  779. if (msm_obj->aspace) {
  780. mutex_lock(&msm_obj->aspace->list_lock);
  781. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  782. obj);
  783. mutex_unlock(&msm_obj->aspace->list_lock);
  784. }
  785. if (obj->import_attach) {
  786. if (msm_obj->vaddr)
  787. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  788. /* Don't drop the pages for imported dmabuf, as they are not
  789. * ours, just free the array we allocated:
  790. */
  791. if (msm_obj->pages)
  792. kvfree(msm_obj->pages);
  793. drm_prime_gem_destroy(obj, msm_obj->sgt);
  794. } else {
  795. msm_gem_vunmap_locked(obj);
  796. put_pages(obj);
  797. }
  798. if (msm_obj->resv == &msm_obj->_resv)
  799. dma_resv_fini(msm_obj->resv);
  800. drm_gem_object_release(obj);
  801. mutex_unlock(&msm_obj->lock);
  802. kfree(msm_obj);
  803. }
  804. /* convenience method to construct a GEM buffer object, and userspace handle */
  805. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  806. uint32_t size, uint32_t flags, uint32_t *handle,
  807. char *name)
  808. {
  809. struct drm_gem_object *obj;
  810. int ret;
  811. obj = msm_gem_new(dev, size, flags);
  812. if (IS_ERR(obj))
  813. return PTR_ERR(obj);
  814. if (name)
  815. msm_gem_object_set_name(obj, "%s", name);
  816. ret = drm_gem_handle_create(file, obj, handle);
  817. /* drop reference from allocate - handle holds it now */
  818. drm_gem_object_put_unlocked(obj);
  819. return ret;
  820. }
  821. static int msm_gem_new_impl(struct drm_device *dev,
  822. uint32_t size, uint32_t flags,
  823. struct dma_resv *resv,
  824. struct drm_gem_object **obj,
  825. bool struct_mutex_locked)
  826. {
  827. struct msm_drm_private *priv = dev->dev_private;
  828. struct msm_gem_object *msm_obj;
  829. switch (flags & MSM_BO_CACHE_MASK) {
  830. case MSM_BO_UNCACHED:
  831. case MSM_BO_CACHED:
  832. case MSM_BO_WC:
  833. break;
  834. default:
  835. dev_err(dev->dev, "invalid cache flag: %x\n",
  836. (flags & MSM_BO_CACHE_MASK));
  837. return -EINVAL;
  838. }
  839. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  840. if (!msm_obj)
  841. return -ENOMEM;
  842. mutex_init(&msm_obj->lock);
  843. msm_obj->flags = flags;
  844. msm_obj->madv = MSM_MADV_WILLNEED;
  845. if (resv) {
  846. msm_obj->resv = resv;
  847. } else {
  848. msm_obj->resv = &msm_obj->_resv;
  849. dma_resv_init(msm_obj->resv);
  850. }
  851. INIT_LIST_HEAD(&msm_obj->submit_entry);
  852. INIT_LIST_HEAD(&msm_obj->vmas);
  853. INIT_LIST_HEAD(&msm_obj->iova_list);
  854. msm_obj->aspace = NULL;
  855. msm_obj->in_active_list = false;
  856. if (struct_mutex_locked) {
  857. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  858. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  859. } else {
  860. mutex_lock(&dev->struct_mutex);
  861. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  862. mutex_unlock(&dev->struct_mutex);
  863. }
  864. *obj = &msm_obj->base;
  865. return 0;
  866. }
  867. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  868. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  869. {
  870. struct msm_drm_private *priv = dev->dev_private;
  871. struct drm_gem_object *obj = NULL;
  872. bool use_vram = false;
  873. int ret;
  874. size = PAGE_ALIGN(size);
  875. if (!iommu_present(&platform_bus_type))
  876. use_vram = true;
  877. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  878. use_vram = true;
  879. if (WARN_ON(use_vram && !priv->vram.size))
  880. return ERR_PTR(-EINVAL);
  881. /* Disallow zero sized objects as they make the underlying
  882. * infrastructure grumpy
  883. */
  884. if (size == 0)
  885. return ERR_PTR(-EINVAL);
  886. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  887. if (ret)
  888. goto fail;
  889. if (use_vram) {
  890. struct msm_gem_vma *vma;
  891. struct page **pages;
  892. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  893. mutex_lock(&msm_obj->lock);
  894. vma = add_vma(obj, NULL);
  895. mutex_unlock(&msm_obj->lock);
  896. if (IS_ERR(vma)) {
  897. ret = PTR_ERR(vma);
  898. goto fail;
  899. }
  900. to_msm_bo(obj)->vram_node = &vma->node;
  901. drm_gem_private_object_init(dev, obj, size);
  902. pages = get_pages(obj);
  903. if (IS_ERR(pages)) {
  904. ret = PTR_ERR(pages);
  905. goto fail;
  906. }
  907. vma->iova = physaddr(obj);
  908. } else {
  909. ret = drm_gem_object_init(dev, obj, size);
  910. if (ret)
  911. goto fail;
  912. }
  913. return obj;
  914. fail:
  915. drm_gem_object_put_unlocked(obj);
  916. return ERR_PTR(ret);
  917. }
  918. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  919. uint32_t size, uint32_t flags)
  920. {
  921. return _msm_gem_new(dev, size, flags, true);
  922. }
  923. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  924. uint32_t size, uint32_t flags)
  925. {
  926. return _msm_gem_new(dev, size, flags, false);
  927. }
  928. int msm_gem_delayed_import(struct drm_gem_object *obj)
  929. {
  930. struct dma_buf_attachment *attach;
  931. struct sg_table *sgt;
  932. struct msm_gem_object *msm_obj;
  933. int ret = 0;
  934. if (!obj) {
  935. DRM_ERROR("NULL drm gem object\n");
  936. return -EINVAL;
  937. }
  938. msm_obj = to_msm_bo(obj);
  939. if (!obj->import_attach) {
  940. DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
  941. return -EINVAL;
  942. }
  943. attach = obj->import_attach;
  944. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  945. if (msm_obj->flags & MSM_BO_SKIPSYNC)
  946. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  947. if (msm_obj->flags & MSM_BO_KEEPATTRS)
  948. attach->dma_map_attrs |=
  949. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  950. /*
  951. * dma_buf_map_attachment will call dma_map_sg for ion buffer
  952. * mapping, and iova will get mapped when the function returns.
  953. */
  954. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  955. if (IS_ERR(sgt)) {
  956. ret = PTR_ERR(sgt);
  957. DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
  958. ret);
  959. goto fail_import;
  960. }
  961. msm_obj->sgt = sgt;
  962. msm_obj->pages = NULL;
  963. fail_import:
  964. return ret;
  965. }
  966. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  967. struct dma_buf *dmabuf, struct sg_table *sgt)
  968. {
  969. struct msm_gem_object *msm_obj;
  970. struct drm_gem_object *obj = NULL;
  971. uint32_t size;
  972. int ret;
  973. unsigned long flags = 0;
  974. /* if we don't have IOMMU, don't bother pretending we can import: */
  975. if (!iommu_present(&platform_bus_type)) {
  976. dev_err(dev->dev, "cannot import without IOMMU\n");
  977. return ERR_PTR(-EINVAL);
  978. }
  979. size = PAGE_ALIGN(dmabuf->size);
  980. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
  981. false);
  982. if (ret)
  983. goto fail;
  984. drm_gem_private_object_init(dev, obj, size);
  985. msm_obj = to_msm_bo(obj);
  986. mutex_lock(&msm_obj->lock);
  987. msm_obj->sgt = sgt;
  988. msm_obj->pages = NULL;
  989. /*
  990. * 1) If sg table is NULL, user should call msm_gem_delayed_import
  991. * to add back the sg table to the drm gem object.
  992. *
  993. * 2) Add buffer flag unconditionally for all import cases.
  994. * # Cached buffer will be attached immediately hence sgt will
  995. * be available upon gem obj creation.
  996. * # Un-cached buffer will follow delayed attach hence sgt
  997. * will be NULL upon gem obj creation.
  998. */
  999. msm_obj->flags |= MSM_BO_EXTBUF;
  1000. /*
  1001. * For all uncached buffers, there is no need to perform cache
  1002. * maintenance on dma map/unmap time.
  1003. */
  1004. ret = dma_buf_get_flags(dmabuf, &flags);
  1005. if (ret) {
  1006. DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
  1007. } else if ((flags & ION_FLAG_CACHED) == 0) {
  1008. DRM_DEBUG("Buffer is uncached type\n");
  1009. msm_obj->flags |= MSM_BO_SKIPSYNC;
  1010. }
  1011. mutex_unlock(&msm_obj->lock);
  1012. return obj;
  1013. fail:
  1014. drm_gem_object_put_unlocked(obj);
  1015. return ERR_PTR(ret);
  1016. }
  1017. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1018. uint32_t flags, struct msm_gem_address_space *aspace,
  1019. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  1020. {
  1021. void *vaddr;
  1022. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  1023. int ret;
  1024. if (IS_ERR(obj))
  1025. return ERR_CAST(obj);
  1026. if (iova) {
  1027. ret = msm_gem_get_iova(obj, aspace, iova);
  1028. if (ret)
  1029. goto err;
  1030. }
  1031. vaddr = msm_gem_get_vaddr(obj);
  1032. if (IS_ERR(vaddr)) {
  1033. msm_gem_put_iova(obj, aspace);
  1034. ret = PTR_ERR(vaddr);
  1035. goto err;
  1036. }
  1037. if (bo)
  1038. *bo = obj;
  1039. return vaddr;
  1040. err:
  1041. if (locked)
  1042. drm_gem_object_put(obj);
  1043. else
  1044. drm_gem_object_put_unlocked(obj);
  1045. return ERR_PTR(ret);
  1046. }
  1047. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1048. uint32_t flags, struct msm_gem_address_space *aspace,
  1049. struct drm_gem_object **bo, uint64_t *iova)
  1050. {
  1051. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  1052. }
  1053. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  1054. uint32_t flags, struct msm_gem_address_space *aspace,
  1055. struct drm_gem_object **bo, uint64_t *iova)
  1056. {
  1057. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  1058. }
  1059. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1060. struct msm_gem_address_space *aspace, bool locked)
  1061. {
  1062. if (IS_ERR_OR_NULL(bo))
  1063. return;
  1064. msm_gem_put_vaddr(bo);
  1065. msm_gem_unpin_iova(bo, aspace);
  1066. if (locked)
  1067. drm_gem_object_put(bo);
  1068. else
  1069. drm_gem_object_put_unlocked(bo);
  1070. }
  1071. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1072. {
  1073. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1074. va_list ap;
  1075. if (!fmt)
  1076. return;
  1077. va_start(ap, fmt);
  1078. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1079. va_end(ap);
  1080. }