msm_gem.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/spinlock.h>
  19. #include <linux/shmem_fs.h>
  20. #include <linux/dma-buf.h>
  21. #include <linux/pfn_t.h>
  22. #include <linux/ion.h>
  23. #include "msm_drv.h"
  24. #include "msm_gem.h"
  25. #include "msm_mmu.h"
  26. #include "sde_dbg.h"
  27. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  28. static dma_addr_t physaddr(struct drm_gem_object *obj)
  29. {
  30. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  31. struct msm_drm_private *priv = obj->dev->dev_private;
  32. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  33. priv->vram.paddr;
  34. }
  35. static bool use_pages(struct drm_gem_object *obj)
  36. {
  37. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  38. return !msm_obj->vram_node;
  39. }
  40. /* allocate pages from VRAM carveout, used when no IOMMU: */
  41. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  42. {
  43. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  44. struct msm_drm_private *priv = obj->dev->dev_private;
  45. dma_addr_t paddr;
  46. struct page **p;
  47. int ret, i;
  48. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  49. if (!p)
  50. return ERR_PTR(-ENOMEM);
  51. spin_lock(&priv->vram.lock);
  52. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  53. spin_unlock(&priv->vram.lock);
  54. if (ret) {
  55. kvfree(p);
  56. return ERR_PTR(ret);
  57. }
  58. paddr = physaddr(obj);
  59. for (i = 0; i < npages; i++) {
  60. p[i] = phys_to_page(paddr);
  61. paddr += PAGE_SIZE;
  62. }
  63. return p;
  64. }
  65. static struct page **get_pages(struct drm_gem_object *obj)
  66. {
  67. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  68. struct device *aspace_dev;
  69. if (obj->import_attach)
  70. return msm_obj->pages;
  71. if (!msm_obj->pages) {
  72. struct drm_device *dev = obj->dev;
  73. struct page **p;
  74. int npages = obj->size >> PAGE_SHIFT;
  75. if (use_pages(obj))
  76. p = drm_gem_get_pages(obj);
  77. else
  78. p = get_pages_vram(obj, npages);
  79. if (IS_ERR(p)) {
  80. dev_err(dev->dev, "could not get pages: %ld\n",
  81. PTR_ERR(p));
  82. return p;
  83. }
  84. msm_obj->pages = p;
  85. msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  86. if (IS_ERR(msm_obj->sgt)) {
  87. void *ptr = ERR_CAST(msm_obj->sgt);
  88. dev_err(dev->dev, "failed to allocate sgt\n");
  89. msm_obj->sgt = NULL;
  90. return ptr;
  91. }
  92. if (msm_obj->vram_node) {
  93. goto end;
  94. /*
  95. * For non-cached buffers, ensure the new pages are clean
  96. * because display controller, GPU, etc. are not coherent
  97. */
  98. } else if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
  99. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  100. if (aspace_dev)
  101. dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
  102. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  103. else
  104. DRM_ERROR("failed to get aspace_device\n");
  105. }
  106. }
  107. end:
  108. return msm_obj->pages;
  109. }
  110. static void put_pages_vram(struct drm_gem_object *obj)
  111. {
  112. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  113. struct msm_drm_private *priv = obj->dev->dev_private;
  114. spin_lock(&priv->vram.lock);
  115. drm_mm_remove_node(msm_obj->vram_node);
  116. spin_unlock(&priv->vram.lock);
  117. kvfree(msm_obj->pages);
  118. }
  119. static void put_pages(struct drm_gem_object *obj)
  120. {
  121. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  122. if (msm_obj->pages) {
  123. if (msm_obj->sgt) {
  124. sg_free_table(msm_obj->sgt);
  125. kfree(msm_obj->sgt);
  126. }
  127. if (use_pages(obj))
  128. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  129. else
  130. put_pages_vram(obj);
  131. msm_obj->pages = NULL;
  132. }
  133. }
  134. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  135. {
  136. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  137. struct page **p;
  138. mutex_lock(&msm_obj->lock);
  139. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  140. mutex_unlock(&msm_obj->lock);
  141. return ERR_PTR(-EBUSY);
  142. }
  143. p = get_pages(obj);
  144. mutex_unlock(&msm_obj->lock);
  145. return p;
  146. }
  147. void msm_gem_put_pages(struct drm_gem_object *obj)
  148. {
  149. /* when we start tracking the pin count, then do something here */
  150. }
  151. void msm_gem_sync(struct drm_gem_object *obj)
  152. {
  153. struct msm_gem_object *msm_obj;
  154. struct device *aspace_dev;
  155. if (!obj)
  156. return;
  157. msm_obj = to_msm_bo(obj);
  158. if (msm_obj->vram_node)
  159. return;
  160. /*
  161. * dma_sync_sg_for_device synchronises a single contiguous or
  162. * scatter/gather mapping for the CPU and device.
  163. */
  164. aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
  165. if (aspace_dev)
  166. dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
  167. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  168. else
  169. DRM_ERROR("failed to get aspace_device\n");
  170. }
  171. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  172. struct vm_area_struct *vma)
  173. {
  174. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  175. vma->vm_flags &= ~VM_PFNMAP;
  176. vma->vm_flags |= VM_MIXEDMAP;
  177. if (msm_obj->flags & MSM_BO_WC) {
  178. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  179. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  180. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  181. } else {
  182. /*
  183. * Shunt off cached objs to shmem file so they have their own
  184. * address_space (so unmap_mapping_range does what we want,
  185. * in particular in the case of mmap'd dmabufs)
  186. */
  187. fput(vma->vm_file);
  188. get_file(obj->filp);
  189. vma->vm_pgoff = 0;
  190. vma->vm_file = obj->filp;
  191. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  192. }
  193. return 0;
  194. }
  195. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  196. {
  197. int ret;
  198. ret = drm_gem_mmap(filp, vma);
  199. if (ret) {
  200. DBG("mmap failed: %d", ret);
  201. return ret;
  202. }
  203. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  204. }
  205. vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  206. {
  207. struct vm_area_struct *vma = vmf->vma;
  208. struct drm_gem_object *obj = vma->vm_private_data;
  209. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  210. struct page **pages;
  211. unsigned long pfn;
  212. pgoff_t pgoff;
  213. int err;
  214. vm_fault_t ret;
  215. /*
  216. * vm_ops.open/drm_gem_mmap_obj and close get and put
  217. * a reference on obj. So, we dont need to hold one here.
  218. */
  219. err = mutex_lock_interruptible(&msm_obj->lock);
  220. if (err) {
  221. ret = VM_FAULT_NOPAGE;
  222. goto out;
  223. }
  224. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  225. mutex_unlock(&msm_obj->lock);
  226. return VM_FAULT_SIGBUS;
  227. }
  228. /* make sure we have pages attached now */
  229. pages = get_pages(obj);
  230. if (IS_ERR(pages)) {
  231. ret = vmf_error(PTR_ERR(pages));
  232. goto out_unlock;
  233. }
  234. /* We don't use vmf->pgoff since that has the fake offset: */
  235. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  236. pfn = page_to_pfn(pages[pgoff]);
  237. VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
  238. pfn, pfn << PAGE_SHIFT);
  239. ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  240. out_unlock:
  241. mutex_unlock(&msm_obj->lock);
  242. out:
  243. return ret;
  244. }
  245. /** get mmap offset */
  246. static uint64_t mmap_offset(struct drm_gem_object *obj)
  247. {
  248. struct drm_device *dev = obj->dev;
  249. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  250. int ret;
  251. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  252. /* Make it mmapable */
  253. ret = drm_gem_create_mmap_offset(obj);
  254. if (ret) {
  255. dev_err(dev->dev, "could not allocate mmap offset\n");
  256. return 0;
  257. }
  258. return drm_vma_node_offset_addr(&obj->vma_node);
  259. }
  260. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  261. {
  262. uint64_t offset;
  263. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  264. mutex_lock(&msm_obj->lock);
  265. offset = mmap_offset(obj);
  266. mutex_unlock(&msm_obj->lock);
  267. return offset;
  268. }
  269. dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
  270. {
  271. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  272. struct sg_table *sgt;
  273. if (!msm_obj->sgt) {
  274. sgt = dma_buf_map_attachment(obj->import_attach,
  275. DMA_BIDIRECTIONAL);
  276. if (IS_ERR_OR_NULL(sgt)) {
  277. DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
  278. PTR_ERR(sgt));
  279. return 0;
  280. }
  281. msm_obj->sgt = sgt;
  282. }
  283. return sg_phys(msm_obj->sgt->sgl);
  284. }
  285. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  286. struct msm_gem_address_space *aspace)
  287. {
  288. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  289. struct msm_gem_vma *vma;
  290. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  291. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  292. if (!vma)
  293. return ERR_PTR(-ENOMEM);
  294. vma->aspace = aspace;
  295. msm_obj->aspace = aspace;
  296. list_add_tail(&vma->list, &msm_obj->vmas);
  297. return vma;
  298. }
  299. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  300. struct msm_gem_address_space *aspace)
  301. {
  302. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  303. struct msm_gem_vma *vma;
  304. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  305. list_for_each_entry(vma, &msm_obj->vmas, list) {
  306. if (vma->aspace == aspace)
  307. return vma;
  308. }
  309. return NULL;
  310. }
  311. static void del_vma(struct msm_gem_vma *vma)
  312. {
  313. if (!vma)
  314. return;
  315. list_del(&vma->list);
  316. kfree(vma);
  317. }
  318. /* Called with msm_obj->lock locked */
  319. static void
  320. put_iova(struct drm_gem_object *obj)
  321. {
  322. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  323. struct msm_gem_vma *vma, *tmp;
  324. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  325. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  326. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  327. msm_obj->flags);
  328. /*
  329. * put_iova removes the domain connected to the obj which makes
  330. * the aspace inaccessible. Store the aspace, as it is used to
  331. * update the active_list during gem_free_obj and gem_purge.
  332. */
  333. msm_obj->aspace = vma->aspace;
  334. del_vma(vma);
  335. }
  336. }
  337. /* get iova, taking a reference. Should have a matching put */
  338. static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
  339. struct msm_gem_address_space *aspace, uint64_t *iova)
  340. {
  341. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  342. struct msm_gem_vma *vma;
  343. int ret = 0;
  344. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  345. vma = lookup_vma(obj, aspace);
  346. if (!vma) {
  347. struct page **pages;
  348. struct device *dev;
  349. struct dma_buf *dmabuf;
  350. bool reattach = false;
  351. dev = msm_gem_get_aspace_device(aspace);
  352. if ((dev && obj->import_attach) &&
  353. ((dev != obj->import_attach->dev) ||
  354. msm_obj->obj_dirty)) {
  355. dmabuf = obj->import_attach->dmabuf;
  356. DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
  357. obj->import_attach->dev, dev);
  358. SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
  359. msm_obj->obj_dirty);
  360. if (msm_obj->sgt)
  361. dma_buf_unmap_attachment(obj->import_attach,
  362. msm_obj->sgt, DMA_BIDIRECTIONAL);
  363. dma_buf_detach(dmabuf, obj->import_attach);
  364. obj->import_attach = dma_buf_attach(dmabuf, dev);
  365. if (IS_ERR(obj->import_attach)) {
  366. DRM_ERROR("dma_buf_attach failure, err=%ld\n",
  367. PTR_ERR(obj->import_attach));
  368. ret = PTR_ERR(obj->import_attach);
  369. return ret;
  370. }
  371. msm_obj->obj_dirty = false;
  372. reattach = true;
  373. }
  374. /* perform delayed import for buffers without existing sgt */
  375. if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
  376. || reattach) {
  377. ret = msm_gem_delayed_import(obj);
  378. if (ret) {
  379. DRM_ERROR("delayed dma-buf import failed %d\n",
  380. ret);
  381. return ret;
  382. }
  383. }
  384. vma = add_vma(obj, aspace);
  385. if (IS_ERR(vma)) {
  386. ret = PTR_ERR(vma);
  387. return ret;
  388. }
  389. pages = get_pages(obj);
  390. if (IS_ERR(pages)) {
  391. ret = PTR_ERR(pages);
  392. goto fail;
  393. }
  394. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  395. obj->size >> PAGE_SHIFT,
  396. msm_obj->flags);
  397. if (ret)
  398. goto fail;
  399. }
  400. *iova = vma->iova;
  401. if (aspace && !msm_obj->in_active_list) {
  402. mutex_lock(&aspace->list_lock);
  403. msm_gem_add_obj_to_aspace_active_list(aspace, obj);
  404. mutex_unlock(&aspace->list_lock);
  405. }
  406. return 0;
  407. fail:
  408. del_vma(vma);
  409. return ret;
  410. }
  411. static int msm_gem_pin_iova(struct drm_gem_object *obj,
  412. struct msm_gem_address_space *aspace)
  413. {
  414. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  415. struct msm_gem_vma *vma;
  416. struct page **pages;
  417. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  418. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
  419. return -EBUSY;
  420. vma = lookup_vma(obj, aspace);
  421. if (WARN_ON(!vma))
  422. return -EINVAL;
  423. pages = get_pages(obj);
  424. if (IS_ERR(pages))
  425. return PTR_ERR(pages);
  426. return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  427. obj->size >> PAGE_SHIFT, msm_obj->flags);
  428. }
  429. /* get iova and pin it. Should have a matching put */
  430. int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
  431. struct msm_gem_address_space *aspace, uint64_t *iova)
  432. {
  433. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  434. u64 local;
  435. int ret;
  436. mutex_lock(&msm_obj->lock);
  437. ret = msm_gem_get_iova_locked(obj, aspace, &local);
  438. if (!ret)
  439. ret = msm_gem_pin_iova(obj, aspace);
  440. if (!ret)
  441. *iova = local;
  442. mutex_unlock(&msm_obj->lock);
  443. return ret;
  444. }
  445. int msm_gem_get_iova(struct drm_gem_object *obj,
  446. struct msm_gem_address_space *aspace, uint64_t *iova)
  447. {
  448. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  449. int ret;
  450. mutex_lock(&msm_obj->lock);
  451. ret = msm_gem_get_iova_locked(obj, aspace, iova);
  452. mutex_unlock(&msm_obj->lock);
  453. return ret;
  454. }
  455. /* get iova without taking a reference, used in places where you have
  456. * already done a 'msm_gem_get_iova()'.
  457. */
  458. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  459. struct msm_gem_address_space *aspace)
  460. {
  461. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  462. struct msm_gem_vma *vma;
  463. mutex_lock(&msm_obj->lock);
  464. vma = lookup_vma(obj, aspace);
  465. mutex_unlock(&msm_obj->lock);
  466. WARN_ON(!vma);
  467. return vma ? vma->iova : 0;
  468. }
  469. /*
  470. * Unpin a iova by updating the reference counts. The memory isn't actually
  471. * purged until something else (shrinker, mm_notifier, destroy, etc) decides
  472. * to get rid of it
  473. */
  474. void msm_gem_unpin_iova(struct drm_gem_object *obj,
  475. struct msm_gem_address_space *aspace)
  476. {
  477. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  478. struct msm_gem_vma *vma;
  479. mutex_lock(&msm_obj->lock);
  480. vma = lookup_vma(obj, aspace);
  481. if (!WARN_ON(!vma))
  482. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
  483. msm_obj->flags);
  484. mutex_unlock(&msm_obj->lock);
  485. }
  486. void msm_gem_put_iova(struct drm_gem_object *obj,
  487. struct msm_gem_address_space *aspace)
  488. {
  489. // XXX TODO ..
  490. // NOTE: probably don't need a _locked() version.. we wouldn't
  491. // normally unmap here, but instead just mark that it could be
  492. // unmapped (if the iova refcnt drops to zero), but then later
  493. // if another _get_iova_locked() fails we can start unmapping
  494. // things that are no longer needed..
  495. }
  496. void msm_gem_aspace_domain_attach_detach_update(
  497. struct msm_gem_address_space *aspace,
  498. bool is_detach)
  499. {
  500. struct msm_gem_object *msm_obj;
  501. struct drm_gem_object *obj;
  502. struct aspace_client *aclient;
  503. int ret;
  504. uint64_t iova;
  505. if (!aspace)
  506. return;
  507. mutex_lock(&aspace->list_lock);
  508. if (is_detach) {
  509. /* Indicate to clients domain is getting detached */
  510. list_for_each_entry(aclient, &aspace->clients, list) {
  511. if (aclient->cb)
  512. aclient->cb(aclient->cb_data,
  513. is_detach);
  514. }
  515. /**
  516. * Unmap active buffers,
  517. * typically clients should do this when the callback is called,
  518. * but this needs to be done for the buffers which are not
  519. * attached to any planes.
  520. */
  521. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  522. obj = &msm_obj->base;
  523. if (obj->import_attach) {
  524. mutex_lock(&msm_obj->lock);
  525. put_iova(obj);
  526. msm_obj->obj_dirty = true;
  527. mutex_unlock(&msm_obj->lock);
  528. }
  529. }
  530. } else {
  531. /* map active buffers */
  532. list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
  533. obj = &msm_obj->base;
  534. ret = msm_gem_get_iova(obj, aspace, &iova);
  535. if (ret) {
  536. mutex_unlock(&aspace->list_lock);
  537. return;
  538. }
  539. }
  540. /* Indicate to clients domain is attached */
  541. list_for_each_entry(aclient, &aspace->clients, list) {
  542. if (aclient->cb)
  543. aclient->cb(aclient->cb_data,
  544. is_detach);
  545. }
  546. }
  547. mutex_unlock(&aspace->list_lock);
  548. }
  549. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  550. struct drm_mode_create_dumb *args)
  551. {
  552. args->pitch = align_pitch(args->width, args->bpp);
  553. args->size = PAGE_ALIGN(args->pitch * args->height);
  554. return msm_gem_new_handle(dev, file, args->size,
  555. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
  556. }
  557. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  558. uint32_t handle, uint64_t *offset)
  559. {
  560. struct drm_gem_object *obj;
  561. int ret = 0;
  562. /* GEM does all our handle to object mapping */
  563. obj = drm_gem_object_lookup(file, handle);
  564. if (obj == NULL) {
  565. ret = -ENOENT;
  566. goto fail;
  567. }
  568. *offset = msm_gem_mmap_offset(obj);
  569. drm_gem_object_put_unlocked(obj);
  570. fail:
  571. return ret;
  572. }
  573. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  574. {
  575. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  576. int ret = 0;
  577. mutex_lock(&msm_obj->lock);
  578. if (WARN_ON(msm_obj->madv > madv)) {
  579. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  580. msm_obj->madv, madv);
  581. mutex_unlock(&msm_obj->lock);
  582. return ERR_PTR(-EBUSY);
  583. }
  584. /* increment vmap_count *before* vmap() call, so shrinker can
  585. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  586. * This guarantees that we won't try to msm_gem_vunmap() this
  587. * same object from within the vmap() call (while we already
  588. * hold msm_obj->lock)
  589. */
  590. msm_obj->vmap_count++;
  591. if (!msm_obj->vaddr) {
  592. struct page **pages = get_pages(obj);
  593. if (IS_ERR(pages)) {
  594. ret = PTR_ERR(pages);
  595. goto fail;
  596. }
  597. if (obj->import_attach) {
  598. ret = dma_buf_begin_cpu_access(
  599. obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
  600. if (ret)
  601. goto fail;
  602. msm_obj->vaddr =
  603. dma_buf_vmap(obj->import_attach->dmabuf);
  604. } else {
  605. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  606. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  607. }
  608. if (msm_obj->vaddr == NULL) {
  609. ret = -ENOMEM;
  610. goto fail;
  611. }
  612. }
  613. mutex_unlock(&msm_obj->lock);
  614. return msm_obj->vaddr;
  615. fail:
  616. msm_obj->vmap_count--;
  617. mutex_unlock(&msm_obj->lock);
  618. return ERR_PTR(ret);
  619. }
  620. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  621. {
  622. return get_vaddr(obj, MSM_MADV_WILLNEED);
  623. }
  624. /*
  625. * Don't use this! It is for the very special case of dumping
  626. * submits from GPU hangs or faults, were the bo may already
  627. * be MSM_MADV_DONTNEED, but we know the buffer is still on the
  628. * active list.
  629. */
  630. void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
  631. {
  632. return get_vaddr(obj, __MSM_MADV_PURGED);
  633. }
  634. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  635. {
  636. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  637. mutex_lock(&msm_obj->lock);
  638. WARN_ON(msm_obj->vmap_count < 1);
  639. msm_obj->vmap_count--;
  640. mutex_unlock(&msm_obj->lock);
  641. }
  642. /* Update madvise status, returns true if not purged, else
  643. * false or -errno.
  644. */
  645. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  646. {
  647. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  648. mutex_lock(&msm_obj->lock);
  649. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  650. if (msm_obj->madv != __MSM_MADV_PURGED)
  651. msm_obj->madv = madv;
  652. madv = msm_obj->madv;
  653. mutex_unlock(&msm_obj->lock);
  654. return (madv != __MSM_MADV_PURGED);
  655. }
  656. void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  657. {
  658. struct drm_device *dev = obj->dev;
  659. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  660. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  661. WARN_ON(!is_purgeable(msm_obj));
  662. WARN_ON(obj->import_attach);
  663. mutex_lock_nested(&msm_obj->lock, subclass);
  664. put_iova(obj);
  665. if (msm_obj->aspace) {
  666. mutex_lock(&msm_obj->aspace->list_lock);
  667. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  668. obj);
  669. mutex_unlock(&msm_obj->aspace->list_lock);
  670. }
  671. msm_gem_vunmap_locked(obj);
  672. put_pages(obj);
  673. msm_obj->madv = __MSM_MADV_PURGED;
  674. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  675. drm_gem_free_mmap_offset(obj);
  676. /* Our goal here is to return as much of the memory as
  677. * is possible back to the system as we are called from OOM.
  678. * To do this we must instruct the shmfs to drop all of its
  679. * backing pages, *now*.
  680. */
  681. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  682. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  683. 0, (loff_t)-1);
  684. mutex_unlock(&msm_obj->lock);
  685. }
  686. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  687. {
  688. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  689. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  690. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  691. return;
  692. if (obj->import_attach) {
  693. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  694. dma_buf_end_cpu_access(obj->import_attach->dmabuf,
  695. DMA_BIDIRECTIONAL);
  696. } else {
  697. vunmap(msm_obj->vaddr);
  698. }
  699. msm_obj->vaddr = NULL;
  700. }
  701. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  702. {
  703. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  704. mutex_lock_nested(&msm_obj->lock, subclass);
  705. msm_gem_vunmap_locked(obj);
  706. mutex_unlock(&msm_obj->lock);
  707. }
  708. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  709. {
  710. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  711. bool write = !!(op & MSM_PREP_WRITE);
  712. unsigned long remain =
  713. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  714. long ret;
  715. ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
  716. true, remain);
  717. if (ret == 0)
  718. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  719. else if (ret < 0)
  720. return ret;
  721. /* TODO cache maintenance */
  722. return 0;
  723. }
  724. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  725. {
  726. /* TODO cache maintenance */
  727. return 0;
  728. }
  729. #ifdef CONFIG_DEBUG_FS
  730. static void describe_fence(struct dma_fence *fence, const char *type,
  731. struct seq_file *m)
  732. {
  733. if (!dma_fence_is_signaled(fence))
  734. seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
  735. fence->ops->get_driver_name(fence),
  736. fence->ops->get_timeline_name(fence),
  737. fence->seqno);
  738. }
  739. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  740. {
  741. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  742. struct dma_resv *robj = msm_obj->resv;
  743. struct dma_resv_list *fobj;
  744. struct dma_fence *fence;
  745. struct msm_gem_vma *vma;
  746. uint64_t off = drm_vma_node_start(&obj->vma_node);
  747. const char *madv;
  748. mutex_lock(&msm_obj->lock);
  749. switch (msm_obj->madv) {
  750. case __MSM_MADV_PURGED:
  751. madv = " purged";
  752. break;
  753. case MSM_MADV_DONTNEED:
  754. madv = " purgeable";
  755. break;
  756. case MSM_MADV_WILLNEED:
  757. default:
  758. madv = "";
  759. break;
  760. }
  761. seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
  762. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  763. obj->name, kref_read(&obj->refcount),
  764. off, msm_obj->vaddr);
  765. seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
  766. if (!list_empty(&msm_obj->vmas)) {
  767. seq_puts(m, " vmas:");
  768. list_for_each_entry(vma, &msm_obj->vmas, list)
  769. seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
  770. vma->iova, vma->mapped ? "mapped" : "unmapped",
  771. vma->inuse);
  772. seq_puts(m, "\n");
  773. }
  774. rcu_read_lock();
  775. fobj = rcu_dereference(robj->fence);
  776. if (fobj) {
  777. unsigned int i, shared_count = fobj->shared_count;
  778. for (i = 0; i < shared_count; i++) {
  779. fence = rcu_dereference(fobj->shared[i]);
  780. describe_fence(fence, "Shared", m);
  781. }
  782. }
  783. fence = rcu_dereference(robj->fence_excl);
  784. if (fence)
  785. describe_fence(fence, "Exclusive", m);
  786. rcu_read_unlock();
  787. mutex_unlock(&msm_obj->lock);
  788. }
  789. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  790. {
  791. struct msm_gem_object *msm_obj;
  792. int count = 0;
  793. size_t size = 0;
  794. seq_puts(m, " flags id ref offset kaddr size madv name\n");
  795. list_for_each_entry(msm_obj, list, mm_list) {
  796. struct drm_gem_object *obj = &msm_obj->base;
  797. seq_puts(m, " ");
  798. msm_gem_describe(obj, m);
  799. count++;
  800. size += obj->size;
  801. }
  802. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  803. }
  804. #endif
  805. /* don't call directly! Use drm_gem_object_put() and friends */
  806. void msm_gem_free_object(struct drm_gem_object *obj)
  807. {
  808. struct drm_device *dev = obj->dev;
  809. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  810. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  811. /* object should not be on active list: */
  812. WARN_ON(is_active(msm_obj));
  813. list_del(&msm_obj->mm_list);
  814. mutex_lock(&msm_obj->lock);
  815. put_iova(obj);
  816. if (msm_obj->aspace) {
  817. mutex_lock(&msm_obj->aspace->list_lock);
  818. msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
  819. obj);
  820. mutex_unlock(&msm_obj->aspace->list_lock);
  821. }
  822. if (obj->import_attach) {
  823. if (msm_obj->vaddr)
  824. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  825. /* Don't drop the pages for imported dmabuf, as they are not
  826. * ours, just free the array we allocated:
  827. */
  828. if (msm_obj->pages)
  829. kvfree(msm_obj->pages);
  830. drm_prime_gem_destroy(obj, msm_obj->sgt);
  831. } else {
  832. msm_gem_vunmap_locked(obj);
  833. put_pages(obj);
  834. }
  835. if (msm_obj->resv == &msm_obj->_resv)
  836. dma_resv_fini(msm_obj->resv);
  837. drm_gem_object_release(obj);
  838. mutex_unlock(&msm_obj->lock);
  839. kfree(msm_obj);
  840. }
  841. /* convenience method to construct a GEM buffer object, and userspace handle */
  842. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  843. uint32_t size, uint32_t flags, uint32_t *handle,
  844. char *name)
  845. {
  846. struct drm_gem_object *obj;
  847. int ret;
  848. obj = msm_gem_new(dev, size, flags);
  849. if (IS_ERR(obj))
  850. return PTR_ERR(obj);
  851. if (name)
  852. msm_gem_object_set_name(obj, "%s", name);
  853. ret = drm_gem_handle_create(file, obj, handle);
  854. /* drop reference from allocate - handle holds it now */
  855. drm_gem_object_put_unlocked(obj);
  856. return ret;
  857. }
  858. static int msm_gem_new_impl(struct drm_device *dev,
  859. uint32_t size, uint32_t flags,
  860. struct dma_resv *resv,
  861. struct drm_gem_object **obj,
  862. bool struct_mutex_locked)
  863. {
  864. struct msm_drm_private *priv = dev->dev_private;
  865. struct msm_gem_object *msm_obj;
  866. switch (flags & MSM_BO_CACHE_MASK) {
  867. case MSM_BO_UNCACHED:
  868. case MSM_BO_CACHED:
  869. case MSM_BO_WC:
  870. break;
  871. default:
  872. dev_err(dev->dev, "invalid cache flag: %x\n",
  873. (flags & MSM_BO_CACHE_MASK));
  874. return -EINVAL;
  875. }
  876. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  877. if (!msm_obj)
  878. return -ENOMEM;
  879. mutex_init(&msm_obj->lock);
  880. msm_obj->flags = flags;
  881. msm_obj->madv = MSM_MADV_WILLNEED;
  882. if (resv) {
  883. msm_obj->resv = resv;
  884. } else {
  885. msm_obj->resv = &msm_obj->_resv;
  886. dma_resv_init(msm_obj->resv);
  887. }
  888. INIT_LIST_HEAD(&msm_obj->submit_entry);
  889. INIT_LIST_HEAD(&msm_obj->vmas);
  890. INIT_LIST_HEAD(&msm_obj->iova_list);
  891. msm_obj->aspace = NULL;
  892. msm_obj->in_active_list = false;
  893. msm_obj->obj_dirty = false;
  894. if (struct_mutex_locked) {
  895. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  896. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  897. } else {
  898. mutex_lock(&dev->struct_mutex);
  899. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  900. mutex_unlock(&dev->struct_mutex);
  901. }
  902. *obj = &msm_obj->base;
  903. return 0;
  904. }
  905. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  906. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  907. {
  908. struct msm_drm_private *priv = dev->dev_private;
  909. struct drm_gem_object *obj = NULL;
  910. bool use_vram = false;
  911. int ret;
  912. size = PAGE_ALIGN(size);
  913. if (!iommu_present(&platform_bus_type))
  914. use_vram = true;
  915. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  916. use_vram = true;
  917. if (WARN_ON(use_vram && !priv->vram.size))
  918. return ERR_PTR(-EINVAL);
  919. /* Disallow zero sized objects as they make the underlying
  920. * infrastructure grumpy
  921. */
  922. if (size == 0)
  923. return ERR_PTR(-EINVAL);
  924. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  925. if (ret)
  926. goto fail;
  927. if (use_vram) {
  928. struct msm_gem_vma *vma;
  929. struct page **pages;
  930. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  931. mutex_lock(&msm_obj->lock);
  932. vma = add_vma(obj, NULL);
  933. mutex_unlock(&msm_obj->lock);
  934. if (IS_ERR(vma)) {
  935. ret = PTR_ERR(vma);
  936. goto fail;
  937. }
  938. to_msm_bo(obj)->vram_node = &vma->node;
  939. drm_gem_private_object_init(dev, obj, size);
  940. pages = get_pages(obj);
  941. if (IS_ERR(pages)) {
  942. ret = PTR_ERR(pages);
  943. goto fail;
  944. }
  945. vma->iova = physaddr(obj);
  946. } else {
  947. ret = drm_gem_object_init(dev, obj, size);
  948. if (ret)
  949. goto fail;
  950. }
  951. return obj;
  952. fail:
  953. drm_gem_object_put_unlocked(obj);
  954. return ERR_PTR(ret);
  955. }
  956. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  957. uint32_t size, uint32_t flags)
  958. {
  959. return _msm_gem_new(dev, size, flags, true);
  960. }
  961. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  962. uint32_t size, uint32_t flags)
  963. {
  964. return _msm_gem_new(dev, size, flags, false);
  965. }
  966. int msm_gem_delayed_import(struct drm_gem_object *obj)
  967. {
  968. struct dma_buf_attachment *attach;
  969. struct sg_table *sgt;
  970. struct msm_gem_object *msm_obj;
  971. int ret = 0;
  972. if (!obj) {
  973. DRM_ERROR("NULL drm gem object\n");
  974. return -EINVAL;
  975. }
  976. msm_obj = to_msm_bo(obj);
  977. if (!obj->import_attach) {
  978. DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
  979. return -EINVAL;
  980. }
  981. attach = obj->import_attach;
  982. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  983. if (msm_obj->flags & MSM_BO_SKIPSYNC)
  984. attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  985. /*
  986. * All SMMU mapping are generated with cache hint.
  987. * SSPP cache hint will control the LLCC access.
  988. */
  989. if (msm_obj->flags & MSM_BO_KEEPATTRS)
  990. attach->dma_map_attrs |=
  991. (DMA_ATTR_IOMMU_USE_UPSTREAM_HINT |
  992. DMA_ATTR_IOMMU_USE_LLC_NWA);
  993. /*
  994. * dma_buf_map_attachment will call dma_map_sg for ion buffer
  995. * mapping, and iova will get mapped when the function returns.
  996. */
  997. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  998. if (IS_ERR(sgt)) {
  999. ret = PTR_ERR(sgt);
  1000. DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
  1001. ret);
  1002. goto fail_import;
  1003. }
  1004. msm_obj->sgt = sgt;
  1005. msm_obj->pages = NULL;
  1006. fail_import:
  1007. return ret;
  1008. }
  1009. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  1010. struct dma_buf *dmabuf, struct sg_table *sgt)
  1011. {
  1012. struct msm_gem_object *msm_obj;
  1013. struct drm_gem_object *obj = NULL;
  1014. uint32_t size;
  1015. int ret;
  1016. unsigned long flags = 0;
  1017. size = PAGE_ALIGN(dmabuf->size);
  1018. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
  1019. false);
  1020. if (ret)
  1021. goto fail;
  1022. drm_gem_private_object_init(dev, obj, size);
  1023. msm_obj = to_msm_bo(obj);
  1024. mutex_lock(&msm_obj->lock);
  1025. msm_obj->sgt = sgt;
  1026. msm_obj->pages = NULL;
  1027. /*
  1028. * 1) If sg table is NULL, user should call msm_gem_delayed_import
  1029. * to add back the sg table to the drm gem object.
  1030. *
  1031. * 2) Add buffer flag unconditionally for all import cases.
  1032. * # Cached buffer will be attached immediately hence sgt will
  1033. * be available upon gem obj creation.
  1034. * # Un-cached buffer will follow delayed attach hence sgt
  1035. * will be NULL upon gem obj creation.
  1036. */
  1037. msm_obj->flags |= MSM_BO_EXTBUF;
  1038. /*
  1039. * For all uncached buffers, there is no need to perform cache
  1040. * maintenance on dma map/unmap time.
  1041. */
  1042. ret = dma_buf_get_flags(dmabuf, &flags);
  1043. if (ret) {
  1044. DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
  1045. } else if ((flags & ION_FLAG_CACHED) == 0) {
  1046. DRM_DEBUG("Buffer is uncached type\n");
  1047. msm_obj->flags |= MSM_BO_SKIPSYNC;
  1048. }
  1049. mutex_unlock(&msm_obj->lock);
  1050. return obj;
  1051. fail:
  1052. drm_gem_object_put_unlocked(obj);
  1053. return ERR_PTR(ret);
  1054. }
  1055. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1056. uint32_t flags, struct msm_gem_address_space *aspace,
  1057. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  1058. {
  1059. void *vaddr;
  1060. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  1061. int ret;
  1062. if (IS_ERR(obj))
  1063. return ERR_CAST(obj);
  1064. if (iova) {
  1065. ret = msm_gem_get_iova(obj, aspace, iova);
  1066. if (ret)
  1067. goto err;
  1068. }
  1069. vaddr = msm_gem_get_vaddr(obj);
  1070. if (IS_ERR(vaddr)) {
  1071. msm_gem_put_iova(obj, aspace);
  1072. ret = PTR_ERR(vaddr);
  1073. goto err;
  1074. }
  1075. if (bo)
  1076. *bo = obj;
  1077. return vaddr;
  1078. err:
  1079. if (locked)
  1080. drm_gem_object_put(obj);
  1081. else
  1082. drm_gem_object_put_unlocked(obj);
  1083. return ERR_PTR(ret);
  1084. }
  1085. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  1086. uint32_t flags, struct msm_gem_address_space *aspace,
  1087. struct drm_gem_object **bo, uint64_t *iova)
  1088. {
  1089. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  1090. }
  1091. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  1092. uint32_t flags, struct msm_gem_address_space *aspace,
  1093. struct drm_gem_object **bo, uint64_t *iova)
  1094. {
  1095. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  1096. }
  1097. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1098. struct msm_gem_address_space *aspace, bool locked)
  1099. {
  1100. if (IS_ERR_OR_NULL(bo))
  1101. return;
  1102. msm_gem_put_vaddr(bo);
  1103. msm_gem_unpin_iova(bo, aspace);
  1104. if (locked)
  1105. drm_gem_object_put(bo);
  1106. else
  1107. drm_gem_object_put_unlocked(bo);
  1108. }
  1109. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1110. {
  1111. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1112. va_list ap;
  1113. if (!fmt)
  1114. return;
  1115. va_start(ap, fmt);
  1116. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1117. va_end(ap);
  1118. }