msm_gem.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. */
  6. #include <linux/dma-map-ops.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/shmem_fs.h>
  10. #include <linux/dma-buf.h>
  11. #include <linux/pfn_t.h>
  12. #include <drm/drm_prime.h>
  13. #include "msm_drv.h"
  14. #include "msm_fence.h"
  15. #include "msm_gem.h"
  16. #include "msm_gpu.h"
  17. #include "msm_mmu.h"
  18. static void update_lru(struct drm_gem_object *obj);
  19. static dma_addr_t physaddr(struct drm_gem_object *obj)
  20. {
  21. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  22. struct msm_drm_private *priv = obj->dev->dev_private;
  23. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  24. priv->vram.paddr;
  25. }
  26. static bool use_pages(struct drm_gem_object *obj)
  27. {
  28. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  29. return !msm_obj->vram_node;
  30. }
  31. /*
  32. * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  33. * API. Really GPU cache is out of scope here (handled on cmdstream)
  34. * and all we need to do is invalidate newly allocated pages before
  35. * mapping to CPU as uncached/writecombine.
  36. *
  37. * On top of this, we have the added headache, that depending on
  38. * display generation, the display's iommu may be wired up to either
  39. * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  40. * that here we either have dma-direct or iommu ops.
  41. *
  42. * Let this be a cautionary tail of abstraction gone wrong.
  43. */
  44. static void sync_for_device(struct msm_gem_object *msm_obj)
  45. {
  46. struct device *dev = msm_obj->base.dev->dev;
  47. dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  48. }
  49. static void sync_for_cpu(struct msm_gem_object *msm_obj)
  50. {
  51. struct device *dev = msm_obj->base.dev->dev;
  52. dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  53. }
  54. /* allocate pages from VRAM carveout, used when no IOMMU: */
  55. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  56. {
  57. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  58. struct msm_drm_private *priv = obj->dev->dev_private;
  59. dma_addr_t paddr;
  60. struct page **p;
  61. int ret, i;
  62. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  63. if (!p)
  64. return ERR_PTR(-ENOMEM);
  65. spin_lock(&priv->vram.lock);
  66. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  67. spin_unlock(&priv->vram.lock);
  68. if (ret) {
  69. kvfree(p);
  70. return ERR_PTR(ret);
  71. }
  72. paddr = physaddr(obj);
  73. for (i = 0; i < npages; i++) {
  74. p[i] = pfn_to_page(__phys_to_pfn(paddr));
  75. paddr += PAGE_SIZE;
  76. }
  77. return p;
  78. }
  79. static struct page **get_pages(struct drm_gem_object *obj)
  80. {
  81. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  82. msm_gem_assert_locked(obj);
  83. if (!msm_obj->pages) {
  84. struct drm_device *dev = obj->dev;
  85. struct page **p;
  86. int npages = obj->size >> PAGE_SHIFT;
  87. if (use_pages(obj))
  88. p = drm_gem_get_pages(obj);
  89. else
  90. p = get_pages_vram(obj, npages);
  91. if (IS_ERR(p)) {
  92. DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
  93. PTR_ERR(p));
  94. return p;
  95. }
  96. msm_obj->pages = p;
  97. msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
  98. if (IS_ERR(msm_obj->sgt)) {
  99. void *ptr = ERR_CAST(msm_obj->sgt);
  100. DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
  101. msm_obj->sgt = NULL;
  102. return ptr;
  103. }
  104. /* For non-cached buffers, ensure the new pages are clean
  105. * because display controller, GPU, etc. are not coherent:
  106. */
  107. if (msm_obj->flags & MSM_BO_WC)
  108. sync_for_device(msm_obj);
  109. update_lru(obj);
  110. }
  111. return msm_obj->pages;
  112. }
  113. static void put_pages_vram(struct drm_gem_object *obj)
  114. {
  115. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  116. struct msm_drm_private *priv = obj->dev->dev_private;
  117. spin_lock(&priv->vram.lock);
  118. drm_mm_remove_node(msm_obj->vram_node);
  119. spin_unlock(&priv->vram.lock);
  120. kvfree(msm_obj->pages);
  121. }
  122. static void put_pages(struct drm_gem_object *obj)
  123. {
  124. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  125. if (msm_obj->pages) {
  126. if (msm_obj->sgt) {
  127. /* For non-cached buffers, ensure the new
  128. * pages are clean because display controller,
  129. * GPU, etc. are not coherent:
  130. */
  131. if (msm_obj->flags & MSM_BO_WC)
  132. sync_for_cpu(msm_obj);
  133. sg_free_table(msm_obj->sgt);
  134. kfree(msm_obj->sgt);
  135. msm_obj->sgt = NULL;
  136. }
  137. if (use_pages(obj))
  138. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  139. else
  140. put_pages_vram(obj);
  141. msm_obj->pages = NULL;
  142. update_lru(obj);
  143. }
  144. }
  145. static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
  146. {
  147. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  148. struct page **p;
  149. msm_gem_assert_locked(obj);
  150. if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  151. return ERR_PTR(-EBUSY);
  152. }
  153. p = get_pages(obj);
  154. if (!IS_ERR(p)) {
  155. to_msm_bo(obj)->pin_count++;
  156. update_lru(obj);
  157. }
  158. return p;
  159. }
  160. struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
  161. {
  162. struct page **p;
  163. msm_gem_lock(obj);
  164. p = msm_gem_pin_pages_locked(obj);
  165. msm_gem_unlock(obj);
  166. return p;
  167. }
  168. void msm_gem_unpin_pages(struct drm_gem_object *obj)
  169. {
  170. msm_gem_lock(obj);
  171. msm_gem_unpin_locked(obj);
  172. msm_gem_unlock(obj);
  173. }
  174. static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
  175. {
  176. if (msm_obj->flags & MSM_BO_WC)
  177. return pgprot_writecombine(prot);
  178. return prot;
  179. }
  180. static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
  181. {
  182. struct vm_area_struct *vma = vmf->vma;
  183. struct drm_gem_object *obj = vma->vm_private_data;
  184. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  185. struct page **pages;
  186. unsigned long pfn;
  187. pgoff_t pgoff;
  188. int err;
  189. vm_fault_t ret;
  190. /*
  191. * vm_ops.open/drm_gem_mmap_obj and close get and put
  192. * a reference on obj. So, we dont need to hold one here.
  193. */
  194. err = msm_gem_lock_interruptible(obj);
  195. if (err) {
  196. ret = VM_FAULT_NOPAGE;
  197. goto out;
  198. }
  199. if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  200. msm_gem_unlock(obj);
  201. return VM_FAULT_SIGBUS;
  202. }
  203. /* make sure we have pages attached now */
  204. pages = get_pages(obj);
  205. if (IS_ERR(pages)) {
  206. ret = vmf_error(PTR_ERR(pages));
  207. goto out_unlock;
  208. }
  209. /* We don't use vmf->pgoff since that has the fake offset: */
  210. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  211. pfn = page_to_pfn(pages[pgoff]);
  212. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  213. pfn, pfn << PAGE_SHIFT);
  214. ret = vmf_insert_pfn(vma, vmf->address, pfn);
  215. out_unlock:
  216. msm_gem_unlock(obj);
  217. out:
  218. return ret;
  219. }
  220. /** get mmap offset */
  221. static uint64_t mmap_offset(struct drm_gem_object *obj)
  222. {
  223. struct drm_device *dev = obj->dev;
  224. int ret;
  225. msm_gem_assert_locked(obj);
  226. /* Make it mmapable */
  227. ret = drm_gem_create_mmap_offset(obj);
  228. if (ret) {
  229. DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
  230. return 0;
  231. }
  232. return drm_vma_node_offset_addr(&obj->vma_node);
  233. }
  234. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  235. {
  236. uint64_t offset;
  237. msm_gem_lock(obj);
  238. offset = mmap_offset(obj);
  239. msm_gem_unlock(obj);
  240. return offset;
  241. }
  242. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  243. struct msm_gem_address_space *aspace)
  244. {
  245. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  246. struct msm_gem_vma *vma;
  247. msm_gem_assert_locked(obj);
  248. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  249. if (!vma)
  250. return ERR_PTR(-ENOMEM);
  251. vma->aspace = aspace;
  252. list_add_tail(&vma->list, &msm_obj->vmas);
  253. return vma;
  254. }
  255. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  256. struct msm_gem_address_space *aspace)
  257. {
  258. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  259. struct msm_gem_vma *vma;
  260. msm_gem_assert_locked(obj);
  261. list_for_each_entry(vma, &msm_obj->vmas, list) {
  262. if (vma->aspace == aspace)
  263. return vma;
  264. }
  265. return NULL;
  266. }
  267. static void del_vma(struct msm_gem_vma *vma)
  268. {
  269. if (!vma)
  270. return;
  271. list_del(&vma->list);
  272. kfree(vma);
  273. }
  274. /*
  275. * If close is true, this also closes the VMA (releasing the allocated
  276. * iova range) in addition to removing the iommu mapping. In the eviction
  277. * case (!close), we keep the iova allocated, but only remove the iommu
  278. * mapping.
  279. */
  280. static void
  281. put_iova_spaces(struct drm_gem_object *obj, bool close)
  282. {
  283. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  284. struct msm_gem_vma *vma;
  285. msm_gem_assert_locked(obj);
  286. list_for_each_entry(vma, &msm_obj->vmas, list) {
  287. if (vma->aspace) {
  288. msm_gem_purge_vma(vma->aspace, vma);
  289. if (close)
  290. msm_gem_close_vma(vma->aspace, vma);
  291. }
  292. }
  293. }
  294. /* Called with msm_obj locked */
  295. static void
  296. put_iova_vmas(struct drm_gem_object *obj)
  297. {
  298. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  299. struct msm_gem_vma *vma, *tmp;
  300. msm_gem_assert_locked(obj);
  301. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  302. del_vma(vma);
  303. }
  304. }
  305. static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
  306. struct msm_gem_address_space *aspace,
  307. u64 range_start, u64 range_end)
  308. {
  309. struct msm_gem_vma *vma;
  310. msm_gem_assert_locked(obj);
  311. vma = lookup_vma(obj, aspace);
  312. if (!vma) {
  313. int ret;
  314. vma = add_vma(obj, aspace);
  315. if (IS_ERR(vma))
  316. return vma;
  317. ret = msm_gem_init_vma(aspace, vma, obj->size,
  318. range_start, range_end);
  319. if (ret) {
  320. del_vma(vma);
  321. return ERR_PTR(ret);
  322. }
  323. } else {
  324. GEM_WARN_ON(vma->iova < range_start);
  325. GEM_WARN_ON((vma->iova + obj->size) > range_end);
  326. }
  327. return vma;
  328. }
  329. int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
  330. {
  331. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  332. struct page **pages;
  333. int ret, prot = IOMMU_READ;
  334. if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
  335. prot |= IOMMU_WRITE;
  336. if (msm_obj->flags & MSM_BO_MAP_PRIV)
  337. prot |= IOMMU_PRIV;
  338. if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
  339. prot |= IOMMU_CACHE;
  340. msm_gem_assert_locked(obj);
  341. if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
  342. return -EBUSY;
  343. pages = msm_gem_pin_pages_locked(obj);
  344. if (IS_ERR(pages))
  345. return PTR_ERR(pages);
  346. ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
  347. if (ret)
  348. msm_gem_unpin_locked(obj);
  349. return ret;
  350. }
  351. void msm_gem_unpin_locked(struct drm_gem_object *obj)
  352. {
  353. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  354. msm_gem_assert_locked(obj);
  355. msm_obj->pin_count--;
  356. GEM_WARN_ON(msm_obj->pin_count < 0);
  357. update_lru(obj);
  358. }
  359. struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
  360. struct msm_gem_address_space *aspace)
  361. {
  362. return get_vma_locked(obj, aspace, 0, U64_MAX);
  363. }
  364. static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
  365. struct msm_gem_address_space *aspace, uint64_t *iova,
  366. u64 range_start, u64 range_end)
  367. {
  368. struct msm_gem_vma *vma;
  369. int ret;
  370. msm_gem_assert_locked(obj);
  371. vma = get_vma_locked(obj, aspace, range_start, range_end);
  372. if (IS_ERR(vma))
  373. return PTR_ERR(vma);
  374. ret = msm_gem_pin_vma_locked(obj, vma);
  375. if (!ret)
  376. *iova = vma->iova;
  377. return ret;
  378. }
  379. /*
  380. * get iova and pin it. Should have a matching put
  381. * limits iova to specified range (in pages)
  382. */
  383. int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
  384. struct msm_gem_address_space *aspace, uint64_t *iova,
  385. u64 range_start, u64 range_end)
  386. {
  387. int ret;
  388. msm_gem_lock(obj);
  389. ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
  390. msm_gem_unlock(obj);
  391. return ret;
  392. }
  393. /* get iova and pin it. Should have a matching put */
  394. int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
  395. struct msm_gem_address_space *aspace, uint64_t *iova)
  396. {
  397. return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
  398. }
  399. /*
  400. * Get an iova but don't pin it. Doesn't need a put because iovas are currently
  401. * valid for the life of the object
  402. */
  403. int msm_gem_get_iova(struct drm_gem_object *obj,
  404. struct msm_gem_address_space *aspace, uint64_t *iova)
  405. {
  406. struct msm_gem_vma *vma;
  407. int ret = 0;
  408. msm_gem_lock(obj);
  409. vma = get_vma_locked(obj, aspace, 0, U64_MAX);
  410. if (IS_ERR(vma)) {
  411. ret = PTR_ERR(vma);
  412. } else {
  413. *iova = vma->iova;
  414. }
  415. msm_gem_unlock(obj);
  416. return ret;
  417. }
  418. static int clear_iova(struct drm_gem_object *obj,
  419. struct msm_gem_address_space *aspace)
  420. {
  421. struct msm_gem_vma *vma = lookup_vma(obj, aspace);
  422. if (!vma)
  423. return 0;
  424. if (msm_gem_vma_inuse(vma))
  425. return -EBUSY;
  426. msm_gem_purge_vma(vma->aspace, vma);
  427. msm_gem_close_vma(vma->aspace, vma);
  428. del_vma(vma);
  429. return 0;
  430. }
  431. /*
  432. * Get the requested iova but don't pin it. Fails if the requested iova is
  433. * not available. Doesn't need a put because iovas are currently valid for
  434. * the life of the object.
  435. *
  436. * Setting an iova of zero will clear the vma.
  437. */
  438. int msm_gem_set_iova(struct drm_gem_object *obj,
  439. struct msm_gem_address_space *aspace, uint64_t iova)
  440. {
  441. int ret = 0;
  442. msm_gem_lock(obj);
  443. if (!iova) {
  444. ret = clear_iova(obj, aspace);
  445. } else {
  446. struct msm_gem_vma *vma;
  447. vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
  448. if (IS_ERR(vma)) {
  449. ret = PTR_ERR(vma);
  450. } else if (GEM_WARN_ON(vma->iova != iova)) {
  451. clear_iova(obj, aspace);
  452. ret = -EBUSY;
  453. }
  454. }
  455. msm_gem_unlock(obj);
  456. return ret;
  457. }
  458. /*
  459. * Unpin a iova by updating the reference counts. The memory isn't actually
  460. * purged until something else (shrinker, mm_notifier, destroy, etc) decides
  461. * to get rid of it
  462. */
  463. void msm_gem_unpin_iova(struct drm_gem_object *obj,
  464. struct msm_gem_address_space *aspace)
  465. {
  466. struct msm_gem_vma *vma;
  467. msm_gem_lock(obj);
  468. vma = lookup_vma(obj, aspace);
  469. if (!GEM_WARN_ON(!vma)) {
  470. msm_gem_unpin_vma(vma);
  471. msm_gem_unpin_locked(obj);
  472. }
  473. msm_gem_unlock(obj);
  474. }
  475. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  476. struct drm_mode_create_dumb *args)
  477. {
  478. args->pitch = align_pitch(args->width, args->bpp);
  479. args->size = PAGE_ALIGN(args->pitch * args->height);
  480. return msm_gem_new_handle(dev, file, args->size,
  481. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
  482. }
  483. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  484. uint32_t handle, uint64_t *offset)
  485. {
  486. struct drm_gem_object *obj;
  487. int ret = 0;
  488. /* GEM does all our handle to object mapping */
  489. obj = drm_gem_object_lookup(file, handle);
  490. if (obj == NULL) {
  491. ret = -ENOENT;
  492. goto fail;
  493. }
  494. *offset = msm_gem_mmap_offset(obj);
  495. drm_gem_object_put(obj);
  496. fail:
  497. return ret;
  498. }
  499. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  500. {
  501. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  502. int ret = 0;
  503. msm_gem_assert_locked(obj);
  504. if (obj->import_attach)
  505. return ERR_PTR(-ENODEV);
  506. if (GEM_WARN_ON(msm_obj->madv > madv)) {
  507. DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  508. msm_obj->madv, madv);
  509. return ERR_PTR(-EBUSY);
  510. }
  511. /* increment vmap_count *before* vmap() call, so shrinker can
  512. * check vmap_count (is_vunmapable()) outside of msm_obj lock.
  513. * This guarantees that we won't try to msm_gem_vunmap() this
  514. * same object from within the vmap() call (while we already
  515. * hold msm_obj lock)
  516. */
  517. msm_obj->vmap_count++;
  518. if (!msm_obj->vaddr) {
  519. struct page **pages = get_pages(obj);
  520. if (IS_ERR(pages)) {
  521. ret = PTR_ERR(pages);
  522. goto fail;
  523. }
  524. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  525. VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
  526. if (msm_obj->vaddr == NULL) {
  527. ret = -ENOMEM;
  528. goto fail;
  529. }
  530. update_lru(obj);
  531. }
  532. return msm_obj->vaddr;
  533. fail:
  534. msm_obj->vmap_count--;
  535. return ERR_PTR(ret);
  536. }
  537. void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
  538. {
  539. return get_vaddr(obj, MSM_MADV_WILLNEED);
  540. }
  541. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  542. {
  543. void *ret;
  544. msm_gem_lock(obj);
  545. ret = msm_gem_get_vaddr_locked(obj);
  546. msm_gem_unlock(obj);
  547. return ret;
  548. }
  549. /*
  550. * Don't use this! It is for the very special case of dumping
  551. * submits from GPU hangs or faults, were the bo may already
  552. * be MSM_MADV_DONTNEED, but we know the buffer is still on the
  553. * active list.
  554. */
  555. void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
  556. {
  557. return get_vaddr(obj, __MSM_MADV_PURGED);
  558. }
  559. void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
  560. {
  561. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  562. msm_gem_assert_locked(obj);
  563. GEM_WARN_ON(msm_obj->vmap_count < 1);
  564. msm_obj->vmap_count--;
  565. }
  566. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  567. {
  568. msm_gem_lock(obj);
  569. msm_gem_put_vaddr_locked(obj);
  570. msm_gem_unlock(obj);
  571. }
  572. /* Update madvise status, returns true if not purged, else
  573. * false or -errno.
  574. */
  575. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  576. {
  577. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  578. msm_gem_lock(obj);
  579. if (msm_obj->madv != __MSM_MADV_PURGED)
  580. msm_obj->madv = madv;
  581. madv = msm_obj->madv;
  582. /* If the obj is inactive, we might need to move it
  583. * between inactive lists
  584. */
  585. update_lru(obj);
  586. msm_gem_unlock(obj);
  587. return (madv != __MSM_MADV_PURGED);
  588. }
  589. void msm_gem_purge(struct drm_gem_object *obj)
  590. {
  591. struct drm_device *dev = obj->dev;
  592. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  593. msm_gem_assert_locked(obj);
  594. GEM_WARN_ON(!is_purgeable(msm_obj));
  595. /* Get rid of any iommu mapping(s): */
  596. put_iova_spaces(obj, true);
  597. msm_gem_vunmap(obj);
  598. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  599. put_pages(obj);
  600. put_iova_vmas(obj);
  601. msm_obj->madv = __MSM_MADV_PURGED;
  602. drm_gem_free_mmap_offset(obj);
  603. /* Our goal here is to return as much of the memory as
  604. * is possible back to the system as we are called from OOM.
  605. * To do this we must instruct the shmfs to drop all of its
  606. * backing pages, *now*.
  607. */
  608. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  609. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  610. 0, (loff_t)-1);
  611. }
  612. /*
  613. * Unpin the backing pages and make them available to be swapped out.
  614. */
  615. void msm_gem_evict(struct drm_gem_object *obj)
  616. {
  617. struct drm_device *dev = obj->dev;
  618. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  619. msm_gem_assert_locked(obj);
  620. GEM_WARN_ON(is_unevictable(msm_obj));
  621. /* Get rid of any iommu mapping(s): */
  622. put_iova_spaces(obj, false);
  623. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  624. put_pages(obj);
  625. }
  626. void msm_gem_vunmap(struct drm_gem_object *obj)
  627. {
  628. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  629. msm_gem_assert_locked(obj);
  630. if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
  631. return;
  632. vunmap(msm_obj->vaddr);
  633. msm_obj->vaddr = NULL;
  634. }
  635. static void update_lru(struct drm_gem_object *obj)
  636. {
  637. struct msm_drm_private *priv = obj->dev->dev_private;
  638. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  639. msm_gem_assert_locked(&msm_obj->base);
  640. if (!msm_obj->pages) {
  641. GEM_WARN_ON(msm_obj->pin_count);
  642. GEM_WARN_ON(msm_obj->vmap_count);
  643. drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
  644. } else if (msm_obj->pin_count || msm_obj->vmap_count) {
  645. drm_gem_lru_move_tail(&priv->lru.pinned, obj);
  646. } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
  647. drm_gem_lru_move_tail(&priv->lru.willneed, obj);
  648. } else {
  649. GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
  650. drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
  651. }
  652. }
  653. bool msm_gem_active(struct drm_gem_object *obj)
  654. {
  655. msm_gem_assert_locked(obj);
  656. if (to_msm_bo(obj)->pin_count)
  657. return true;
  658. return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
  659. }
  660. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  661. {
  662. bool write = !!(op & MSM_PREP_WRITE);
  663. unsigned long remain =
  664. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  665. long ret;
  666. ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
  667. true, remain);
  668. if (ret == 0)
  669. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  670. else if (ret < 0)
  671. return ret;
  672. /* TODO cache maintenance */
  673. return 0;
  674. }
  675. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  676. {
  677. /* TODO cache maintenance */
  678. return 0;
  679. }
  680. #ifdef CONFIG_DEBUG_FS
  681. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
  682. struct msm_gem_stats *stats)
  683. {
  684. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  685. struct dma_resv *robj = obj->resv;
  686. struct msm_gem_vma *vma;
  687. uint64_t off = drm_vma_node_start(&obj->vma_node);
  688. const char *madv;
  689. msm_gem_lock(obj);
  690. stats->all.count++;
  691. stats->all.size += obj->size;
  692. if (msm_gem_active(obj)) {
  693. stats->active.count++;
  694. stats->active.size += obj->size;
  695. }
  696. if (msm_obj->pages) {
  697. stats->resident.count++;
  698. stats->resident.size += obj->size;
  699. }
  700. switch (msm_obj->madv) {
  701. case __MSM_MADV_PURGED:
  702. stats->purged.count++;
  703. stats->purged.size += obj->size;
  704. madv = " purged";
  705. break;
  706. case MSM_MADV_DONTNEED:
  707. stats->purgeable.count++;
  708. stats->purgeable.size += obj->size;
  709. madv = " purgeable";
  710. break;
  711. case MSM_MADV_WILLNEED:
  712. default:
  713. madv = "";
  714. break;
  715. }
  716. seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
  717. msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
  718. obj->name, kref_read(&obj->refcount),
  719. off, msm_obj->vaddr);
  720. seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
  721. if (!list_empty(&msm_obj->vmas)) {
  722. seq_puts(m, " vmas:");
  723. list_for_each_entry(vma, &msm_obj->vmas, list) {
  724. const char *name, *comm;
  725. if (vma->aspace) {
  726. struct msm_gem_address_space *aspace = vma->aspace;
  727. struct task_struct *task =
  728. get_pid_task(aspace->pid, PIDTYPE_PID);
  729. if (task) {
  730. comm = kstrdup(task->comm, GFP_KERNEL);
  731. put_task_struct(task);
  732. } else {
  733. comm = NULL;
  734. }
  735. name = aspace->name;
  736. } else {
  737. name = comm = NULL;
  738. }
  739. seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
  740. name, comm ? ":" : "", comm ? comm : "",
  741. vma->aspace, vma->iova,
  742. vma->mapped ? "mapped" : "unmapped",
  743. msm_gem_vma_inuse(vma));
  744. kfree(comm);
  745. }
  746. seq_puts(m, "\n");
  747. }
  748. dma_resv_describe(robj, m);
  749. msm_gem_unlock(obj);
  750. }
  751. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  752. {
  753. struct msm_gem_stats stats = {};
  754. struct msm_gem_object *msm_obj;
  755. seq_puts(m, " flags id ref offset kaddr size madv name\n");
  756. list_for_each_entry(msm_obj, list, node) {
  757. struct drm_gem_object *obj = &msm_obj->base;
  758. seq_puts(m, " ");
  759. msm_gem_describe(obj, m, &stats);
  760. }
  761. seq_printf(m, "Total: %4d objects, %9zu bytes\n",
  762. stats.all.count, stats.all.size);
  763. seq_printf(m, "Active: %4d objects, %9zu bytes\n",
  764. stats.active.count, stats.active.size);
  765. seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
  766. stats.resident.count, stats.resident.size);
  767. seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
  768. stats.purgeable.count, stats.purgeable.size);
  769. seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
  770. stats.purged.count, stats.purged.size);
  771. }
  772. #endif
  773. /* don't call directly! Use drm_gem_object_put() */
  774. static void msm_gem_free_object(struct drm_gem_object *obj)
  775. {
  776. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  777. struct drm_device *dev = obj->dev;
  778. struct msm_drm_private *priv = dev->dev_private;
  779. mutex_lock(&priv->obj_lock);
  780. list_del(&msm_obj->node);
  781. mutex_unlock(&priv->obj_lock);
  782. put_iova_spaces(obj, true);
  783. if (obj->import_attach) {
  784. GEM_WARN_ON(msm_obj->vaddr);
  785. /* Don't drop the pages for imported dmabuf, as they are not
  786. * ours, just free the array we allocated:
  787. */
  788. kvfree(msm_obj->pages);
  789. put_iova_vmas(obj);
  790. drm_prime_gem_destroy(obj, msm_obj->sgt);
  791. } else {
  792. msm_gem_vunmap(obj);
  793. put_pages(obj);
  794. put_iova_vmas(obj);
  795. }
  796. drm_gem_object_release(obj);
  797. kfree(msm_obj);
  798. }
  799. static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
  800. {
  801. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  802. vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
  803. vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
  804. return 0;
  805. }
  806. /* convenience method to construct a GEM buffer object, and userspace handle */
  807. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  808. uint32_t size, uint32_t flags, uint32_t *handle,
  809. char *name)
  810. {
  811. struct drm_gem_object *obj;
  812. int ret;
  813. obj = msm_gem_new(dev, size, flags);
  814. if (IS_ERR(obj))
  815. return PTR_ERR(obj);
  816. if (name)
  817. msm_gem_object_set_name(obj, "%s", name);
  818. ret = drm_gem_handle_create(file, obj, handle);
  819. /* drop reference from allocate - handle holds it now */
  820. drm_gem_object_put(obj);
  821. return ret;
  822. }
  823. static const struct vm_operations_struct vm_ops = {
  824. .fault = msm_gem_fault,
  825. .open = drm_gem_vm_open,
  826. .close = drm_gem_vm_close,
  827. };
  828. static const struct drm_gem_object_funcs msm_gem_object_funcs = {
  829. .free = msm_gem_free_object,
  830. .pin = msm_gem_prime_pin,
  831. .unpin = msm_gem_prime_unpin,
  832. .get_sg_table = msm_gem_prime_get_sg_table,
  833. .vmap = msm_gem_prime_vmap,
  834. .vunmap = msm_gem_prime_vunmap,
  835. .mmap = msm_gem_object_mmap,
  836. .vm_ops = &vm_ops,
  837. };
  838. static int msm_gem_new_impl(struct drm_device *dev,
  839. uint32_t size, uint32_t flags,
  840. struct drm_gem_object **obj)
  841. {
  842. struct msm_drm_private *priv = dev->dev_private;
  843. struct msm_gem_object *msm_obj;
  844. switch (flags & MSM_BO_CACHE_MASK) {
  845. case MSM_BO_CACHED:
  846. case MSM_BO_WC:
  847. break;
  848. case MSM_BO_CACHED_COHERENT:
  849. if (priv->has_cached_coherent)
  850. break;
  851. fallthrough;
  852. default:
  853. DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
  854. (flags & MSM_BO_CACHE_MASK));
  855. return -EINVAL;
  856. }
  857. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  858. if (!msm_obj)
  859. return -ENOMEM;
  860. msm_obj->flags = flags;
  861. msm_obj->madv = MSM_MADV_WILLNEED;
  862. INIT_LIST_HEAD(&msm_obj->node);
  863. INIT_LIST_HEAD(&msm_obj->vmas);
  864. *obj = &msm_obj->base;
  865. (*obj)->funcs = &msm_gem_object_funcs;
  866. return 0;
  867. }
  868. struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
  869. {
  870. struct msm_drm_private *priv = dev->dev_private;
  871. struct msm_gem_object *msm_obj;
  872. struct drm_gem_object *obj = NULL;
  873. bool use_vram = false;
  874. int ret;
  875. size = PAGE_ALIGN(size);
  876. if (!msm_use_mmu(dev))
  877. use_vram = true;
  878. else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
  879. use_vram = true;
  880. if (GEM_WARN_ON(use_vram && !priv->vram.size))
  881. return ERR_PTR(-EINVAL);
  882. /* Disallow zero sized objects as they make the underlying
  883. * infrastructure grumpy
  884. */
  885. if (size == 0)
  886. return ERR_PTR(-EINVAL);
  887. ret = msm_gem_new_impl(dev, size, flags, &obj);
  888. if (ret)
  889. return ERR_PTR(ret);
  890. msm_obj = to_msm_bo(obj);
  891. if (use_vram) {
  892. struct msm_gem_vma *vma;
  893. struct page **pages;
  894. drm_gem_private_object_init(dev, obj, size);
  895. msm_gem_lock(obj);
  896. vma = add_vma(obj, NULL);
  897. msm_gem_unlock(obj);
  898. if (IS_ERR(vma)) {
  899. ret = PTR_ERR(vma);
  900. goto fail;
  901. }
  902. to_msm_bo(obj)->vram_node = &vma->node;
  903. msm_gem_lock(obj);
  904. pages = get_pages(obj);
  905. msm_gem_unlock(obj);
  906. if (IS_ERR(pages)) {
  907. ret = PTR_ERR(pages);
  908. goto fail;
  909. }
  910. vma->iova = physaddr(obj);
  911. } else {
  912. ret = drm_gem_object_init(dev, obj, size);
  913. if (ret)
  914. goto fail;
  915. /*
  916. * Our buffers are kept pinned, so allocating them from the
  917. * MOVABLE zone is a really bad idea, and conflicts with CMA.
  918. * See comments above new_inode() why this is required _and_
  919. * expected if you're going to pin these pages.
  920. */
  921. mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
  922. }
  923. drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
  924. mutex_lock(&priv->obj_lock);
  925. list_add_tail(&msm_obj->node, &priv->objects);
  926. mutex_unlock(&priv->obj_lock);
  927. return obj;
  928. fail:
  929. drm_gem_object_put(obj);
  930. return ERR_PTR(ret);
  931. }
  932. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  933. struct dma_buf *dmabuf, struct sg_table *sgt)
  934. {
  935. struct msm_drm_private *priv = dev->dev_private;
  936. struct msm_gem_object *msm_obj;
  937. struct drm_gem_object *obj;
  938. uint32_t size;
  939. int ret, npages;
  940. /* if we don't have IOMMU, don't bother pretending we can import: */
  941. if (!msm_use_mmu(dev)) {
  942. DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
  943. return ERR_PTR(-EINVAL);
  944. }
  945. size = PAGE_ALIGN(dmabuf->size);
  946. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
  947. if (ret)
  948. return ERR_PTR(ret);
  949. drm_gem_private_object_init(dev, obj, size);
  950. npages = size / PAGE_SIZE;
  951. msm_obj = to_msm_bo(obj);
  952. msm_gem_lock(obj);
  953. msm_obj->sgt = sgt;
  954. msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  955. if (!msm_obj->pages) {
  956. msm_gem_unlock(obj);
  957. ret = -ENOMEM;
  958. goto fail;
  959. }
  960. ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
  961. if (ret) {
  962. msm_gem_unlock(obj);
  963. goto fail;
  964. }
  965. msm_gem_unlock(obj);
  966. drm_gem_lru_move_tail(&priv->lru.pinned, obj);
  967. mutex_lock(&priv->obj_lock);
  968. list_add_tail(&msm_obj->node, &priv->objects);
  969. mutex_unlock(&priv->obj_lock);
  970. return obj;
  971. fail:
  972. drm_gem_object_put(obj);
  973. return ERR_PTR(ret);
  974. }
  975. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  976. uint32_t flags, struct msm_gem_address_space *aspace,
  977. struct drm_gem_object **bo, uint64_t *iova)
  978. {
  979. void *vaddr;
  980. struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
  981. int ret;
  982. if (IS_ERR(obj))
  983. return ERR_CAST(obj);
  984. if (iova) {
  985. ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
  986. if (ret)
  987. goto err;
  988. }
  989. vaddr = msm_gem_get_vaddr(obj);
  990. if (IS_ERR(vaddr)) {
  991. msm_gem_unpin_iova(obj, aspace);
  992. ret = PTR_ERR(vaddr);
  993. goto err;
  994. }
  995. if (bo)
  996. *bo = obj;
  997. return vaddr;
  998. err:
  999. drm_gem_object_put(obj);
  1000. return ERR_PTR(ret);
  1001. }
  1002. void msm_gem_kernel_put(struct drm_gem_object *bo,
  1003. struct msm_gem_address_space *aspace)
  1004. {
  1005. if (IS_ERR_OR_NULL(bo))
  1006. return;
  1007. msm_gem_put_vaddr(bo);
  1008. msm_gem_unpin_iova(bo, aspace);
  1009. drm_gem_object_put(bo);
  1010. }
  1011. void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
  1012. {
  1013. struct msm_gem_object *msm_obj = to_msm_bo(bo);
  1014. va_list ap;
  1015. if (!fmt)
  1016. return;
  1017. va_start(ap, fmt);
  1018. vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
  1019. va_end(ap);
  1020. }