etnaviv_mmu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/scatterlist.h>
  7. #include "common.xml.h"
  8. #include "etnaviv_cmdbuf.h"
  9. #include "etnaviv_drv.h"
  10. #include "etnaviv_gem.h"
  11. #include "etnaviv_gpu.h"
  12. #include "etnaviv_mmu.h"
  13. static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
  14. unsigned long iova, size_t size)
  15. {
  16. size_t unmapped_page, unmapped = 0;
  17. size_t pgsize = SZ_4K;
  18. if (!IS_ALIGNED(iova | size, pgsize)) {
  19. pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
  20. iova, size, pgsize);
  21. return;
  22. }
  23. while (unmapped < size) {
  24. unmapped_page = context->global->ops->unmap(context, iova,
  25. pgsize);
  26. if (!unmapped_page)
  27. break;
  28. iova += unmapped_page;
  29. unmapped += unmapped_page;
  30. }
  31. }
  32. static int etnaviv_context_map(struct etnaviv_iommu_context *context,
  33. unsigned long iova, phys_addr_t paddr,
  34. size_t size, int prot)
  35. {
  36. unsigned long orig_iova = iova;
  37. size_t pgsize = SZ_4K;
  38. size_t orig_size = size;
  39. int ret = 0;
  40. if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
  41. pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
  42. iova, &paddr, size, pgsize);
  43. return -EINVAL;
  44. }
  45. while (size) {
  46. ret = context->global->ops->map(context, iova, paddr, pgsize,
  47. prot);
  48. if (ret)
  49. break;
  50. iova += pgsize;
  51. paddr += pgsize;
  52. size -= pgsize;
  53. }
  54. /* unroll mapping in case something went wrong */
  55. if (ret)
  56. etnaviv_context_unmap(context, orig_iova, orig_size - size);
  57. return ret;
  58. }
  59. static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
  60. struct sg_table *sgt, unsigned len, int prot)
  61. { struct scatterlist *sg;
  62. unsigned int da = iova;
  63. unsigned int i;
  64. int ret;
  65. if (!context || !sgt)
  66. return -EINVAL;
  67. for_each_sgtable_dma_sg(sgt, sg, i) {
  68. phys_addr_t pa = sg_dma_address(sg) - sg->offset;
  69. size_t bytes = sg_dma_len(sg) + sg->offset;
  70. VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
  71. ret = etnaviv_context_map(context, da, pa, bytes, prot);
  72. if (ret)
  73. goto fail;
  74. da += bytes;
  75. }
  76. context->flush_seq++;
  77. return 0;
  78. fail:
  79. etnaviv_context_unmap(context, iova, da - iova);
  80. return ret;
  81. }
  82. static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
  83. struct sg_table *sgt, unsigned len)
  84. {
  85. struct scatterlist *sg;
  86. unsigned int da = iova;
  87. int i;
  88. for_each_sgtable_dma_sg(sgt, sg, i) {
  89. size_t bytes = sg_dma_len(sg) + sg->offset;
  90. etnaviv_context_unmap(context, da, bytes);
  91. VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
  92. BUG_ON(!PAGE_ALIGNED(bytes));
  93. da += bytes;
  94. }
  95. context->flush_seq++;
  96. }
  97. static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
  98. struct etnaviv_vram_mapping *mapping)
  99. {
  100. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  101. lockdep_assert_held(&context->lock);
  102. etnaviv_iommu_unmap(context, mapping->vram_node.start,
  103. etnaviv_obj->sgt, etnaviv_obj->base.size);
  104. drm_mm_remove_node(&mapping->vram_node);
  105. }
  106. void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
  107. {
  108. struct etnaviv_iommu_context *context = mapping->context;
  109. lockdep_assert_held(&context->lock);
  110. WARN_ON(mapping->use);
  111. etnaviv_iommu_remove_mapping(context, mapping);
  112. etnaviv_iommu_context_put(mapping->context);
  113. mapping->context = NULL;
  114. list_del_init(&mapping->mmu_node);
  115. }
  116. static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
  117. struct drm_mm_node *node, size_t size)
  118. {
  119. struct etnaviv_vram_mapping *free = NULL;
  120. enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
  121. int ret;
  122. lockdep_assert_held(&context->lock);
  123. while (1) {
  124. struct etnaviv_vram_mapping *m, *n;
  125. struct drm_mm_scan scan;
  126. struct list_head list;
  127. bool found;
  128. ret = drm_mm_insert_node_in_range(&context->mm, node,
  129. size, 0, 0, 0, U64_MAX, mode);
  130. if (ret != -ENOSPC)
  131. break;
  132. /* Try to retire some entries */
  133. drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
  134. found = 0;
  135. INIT_LIST_HEAD(&list);
  136. list_for_each_entry(free, &context->mappings, mmu_node) {
  137. /* If this vram node has not been used, skip this. */
  138. if (!free->vram_node.mm)
  139. continue;
  140. /*
  141. * If the iova is pinned, then it's in-use,
  142. * so we must keep its mapping.
  143. */
  144. if (free->use)
  145. continue;
  146. list_add(&free->scan_node, &list);
  147. if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
  148. found = true;
  149. break;
  150. }
  151. }
  152. if (!found) {
  153. /* Nothing found, clean up and fail */
  154. list_for_each_entry_safe(m, n, &list, scan_node)
  155. BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
  156. break;
  157. }
  158. /*
  159. * drm_mm does not allow any other operations while
  160. * scanning, so we have to remove all blocks first.
  161. * If drm_mm_scan_remove_block() returns false, we
  162. * can leave the block pinned.
  163. */
  164. list_for_each_entry_safe(m, n, &list, scan_node)
  165. if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
  166. list_del_init(&m->scan_node);
  167. /*
  168. * Unmap the blocks which need to be reaped from the MMU.
  169. * Clear the mmu pointer to prevent the mapping_get finding
  170. * this mapping.
  171. */
  172. list_for_each_entry_safe(m, n, &list, scan_node) {
  173. etnaviv_iommu_reap_mapping(m);
  174. list_del_init(&m->scan_node);
  175. }
  176. mode = DRM_MM_INSERT_EVICT;
  177. /*
  178. * We removed enough mappings so that the new allocation will
  179. * succeed, retry the allocation one more time.
  180. */
  181. }
  182. return ret;
  183. }
  184. static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
  185. struct drm_mm_node *node, size_t size, u64 va)
  186. {
  187. struct etnaviv_vram_mapping *m, *n;
  188. struct drm_mm_node *scan_node;
  189. LIST_HEAD(scan_list);
  190. int ret;
  191. lockdep_assert_held(&context->lock);
  192. ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
  193. va + size, DRM_MM_INSERT_LOWEST);
  194. if (ret != -ENOSPC)
  195. return ret;
  196. /*
  197. * When we can't insert the node, due to a existing mapping blocking
  198. * the address space, there are two possible reasons:
  199. * 1. Userspace genuinely messed up and tried to reuse address space
  200. * before the last job using this VMA has finished executing.
  201. * 2. The existing buffer mappings are idle, but the buffers are not
  202. * destroyed yet (likely due to being referenced by another context) in
  203. * which case the mappings will not be cleaned up and we must reap them
  204. * here to make space for the new mapping.
  205. */
  206. drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
  207. m = container_of(scan_node, struct etnaviv_vram_mapping,
  208. vram_node);
  209. if (m->use)
  210. return -ENOSPC;
  211. list_add(&m->scan_node, &scan_list);
  212. }
  213. list_for_each_entry_safe(m, n, &scan_list, scan_node) {
  214. etnaviv_iommu_reap_mapping(m);
  215. list_del_init(&m->scan_node);
  216. }
  217. return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
  218. va + size, DRM_MM_INSERT_LOWEST);
  219. }
  220. int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
  221. struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
  222. struct etnaviv_vram_mapping *mapping, u64 va)
  223. {
  224. struct sg_table *sgt = etnaviv_obj->sgt;
  225. struct drm_mm_node *node;
  226. int ret;
  227. lockdep_assert_held(&etnaviv_obj->lock);
  228. mutex_lock(&context->lock);
  229. /* v1 MMU can optimize single entry (contiguous) scatterlists */
  230. if (context->global->version == ETNAVIV_IOMMU_V1 &&
  231. sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
  232. u32 iova;
  233. iova = sg_dma_address(sgt->sgl) - memory_base;
  234. if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
  235. mapping->iova = iova;
  236. mapping->context = etnaviv_iommu_context_get(context);
  237. list_add_tail(&mapping->mmu_node, &context->mappings);
  238. ret = 0;
  239. goto unlock;
  240. }
  241. }
  242. node = &mapping->vram_node;
  243. if (va)
  244. ret = etnaviv_iommu_insert_exact(context, node,
  245. etnaviv_obj->base.size, va);
  246. else
  247. ret = etnaviv_iommu_find_iova(context, node,
  248. etnaviv_obj->base.size);
  249. if (ret < 0)
  250. goto unlock;
  251. mapping->iova = node->start;
  252. ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
  253. ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
  254. if (ret < 0) {
  255. drm_mm_remove_node(node);
  256. goto unlock;
  257. }
  258. mapping->context = etnaviv_iommu_context_get(context);
  259. list_add_tail(&mapping->mmu_node, &context->mappings);
  260. unlock:
  261. mutex_unlock(&context->lock);
  262. return ret;
  263. }
  264. void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
  265. struct etnaviv_vram_mapping *mapping)
  266. {
  267. WARN_ON(mapping->use);
  268. mutex_lock(&context->lock);
  269. /* Bail if the mapping has been reaped by another thread */
  270. if (!mapping->context) {
  271. mutex_unlock(&context->lock);
  272. return;
  273. }
  274. /* If the vram node is on the mm, unmap and remove the node */
  275. if (mapping->vram_node.mm == &context->mm)
  276. etnaviv_iommu_remove_mapping(context, mapping);
  277. list_del(&mapping->mmu_node);
  278. mutex_unlock(&context->lock);
  279. etnaviv_iommu_context_put(context);
  280. }
  281. static void etnaviv_iommu_context_free(struct kref *kref)
  282. {
  283. struct etnaviv_iommu_context *context =
  284. container_of(kref, struct etnaviv_iommu_context, refcount);
  285. etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
  286. context->global->ops->free(context);
  287. }
  288. void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
  289. {
  290. kref_put(&context->refcount, etnaviv_iommu_context_free);
  291. }
  292. struct etnaviv_iommu_context *
  293. etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
  294. struct etnaviv_cmdbuf_suballoc *suballoc)
  295. {
  296. struct etnaviv_iommu_context *ctx;
  297. int ret;
  298. if (global->version == ETNAVIV_IOMMU_V1)
  299. ctx = etnaviv_iommuv1_context_alloc(global);
  300. else
  301. ctx = etnaviv_iommuv2_context_alloc(global);
  302. if (!ctx)
  303. return NULL;
  304. ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
  305. global->memory_base);
  306. if (ret)
  307. goto out_free;
  308. if (global->version == ETNAVIV_IOMMU_V1 &&
  309. ctx->cmdbuf_mapping.iova > 0x80000000) {
  310. dev_err(global->dev,
  311. "command buffer outside valid memory window\n");
  312. goto out_unmap;
  313. }
  314. return ctx;
  315. out_unmap:
  316. etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
  317. out_free:
  318. global->ops->free(ctx);
  319. return NULL;
  320. }
  321. void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
  322. struct etnaviv_iommu_context *context)
  323. {
  324. context->global->ops->restore(gpu, context);
  325. }
  326. int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
  327. struct etnaviv_vram_mapping *mapping,
  328. u32 memory_base, dma_addr_t paddr,
  329. size_t size)
  330. {
  331. mutex_lock(&context->lock);
  332. if (mapping->use > 0) {
  333. mapping->use++;
  334. mutex_unlock(&context->lock);
  335. return 0;
  336. }
  337. /*
  338. * For MMUv1 we don't add the suballoc region to the pagetables, as
  339. * those GPUs can only work with cmdbufs accessed through the linear
  340. * window. Instead we manufacture a mapping to make it look uniform
  341. * to the upper layers.
  342. */
  343. if (context->global->version == ETNAVIV_IOMMU_V1) {
  344. mapping->iova = paddr - memory_base;
  345. } else {
  346. struct drm_mm_node *node = &mapping->vram_node;
  347. int ret;
  348. ret = etnaviv_iommu_find_iova(context, node, size);
  349. if (ret < 0) {
  350. mutex_unlock(&context->lock);
  351. return ret;
  352. }
  353. mapping->iova = node->start;
  354. ret = etnaviv_context_map(context, node->start, paddr, size,
  355. ETNAVIV_PROT_READ);
  356. if (ret < 0) {
  357. drm_mm_remove_node(node);
  358. mutex_unlock(&context->lock);
  359. return ret;
  360. }
  361. context->flush_seq++;
  362. }
  363. list_add_tail(&mapping->mmu_node, &context->mappings);
  364. mapping->use = 1;
  365. mutex_unlock(&context->lock);
  366. return 0;
  367. }
  368. void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
  369. struct etnaviv_vram_mapping *mapping)
  370. {
  371. struct drm_mm_node *node = &mapping->vram_node;
  372. mutex_lock(&context->lock);
  373. mapping->use--;
  374. if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
  375. mutex_unlock(&context->lock);
  376. return;
  377. }
  378. etnaviv_context_unmap(context, node->start, node->size);
  379. drm_mm_remove_node(node);
  380. mutex_unlock(&context->lock);
  381. }
  382. size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
  383. {
  384. return context->global->ops->dump_size(context);
  385. }
  386. void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
  387. {
  388. context->global->ops->dump(context, buf);
  389. }
  390. int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
  391. {
  392. enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
  393. struct etnaviv_drm_private *priv = gpu->drm->dev_private;
  394. struct etnaviv_iommu_global *global;
  395. struct device *dev = gpu->drm->dev;
  396. if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
  397. version = ETNAVIV_IOMMU_V2;
  398. if (priv->mmu_global) {
  399. if (priv->mmu_global->version != version) {
  400. dev_err(gpu->dev,
  401. "MMU version doesn't match global version\n");
  402. return -ENXIO;
  403. }
  404. priv->mmu_global->use++;
  405. return 0;
  406. }
  407. global = kzalloc(sizeof(*global), GFP_KERNEL);
  408. if (!global)
  409. return -ENOMEM;
  410. global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
  411. GFP_KERNEL);
  412. if (!global->bad_page_cpu)
  413. goto free_global;
  414. memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
  415. if (version == ETNAVIV_IOMMU_V2) {
  416. global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
  417. &global->v2.pta_dma, GFP_KERNEL);
  418. if (!global->v2.pta_cpu)
  419. goto free_bad_page;
  420. }
  421. global->dev = dev;
  422. global->version = version;
  423. global->use = 1;
  424. mutex_init(&global->lock);
  425. if (version == ETNAVIV_IOMMU_V1)
  426. global->ops = &etnaviv_iommuv1_ops;
  427. else
  428. global->ops = &etnaviv_iommuv2_ops;
  429. priv->mmu_global = global;
  430. return 0;
  431. free_bad_page:
  432. dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
  433. free_global:
  434. kfree(global);
  435. return -ENOMEM;
  436. }
  437. void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
  438. {
  439. struct etnaviv_drm_private *priv = gpu->drm->dev_private;
  440. struct etnaviv_iommu_global *global = priv->mmu_global;
  441. if (--global->use > 0)
  442. return;
  443. if (global->v2.pta_cpu)
  444. dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
  445. global->v2.pta_cpu, global->v2.pta_dma);
  446. if (global->bad_page_cpu)
  447. dma_free_wc(global->dev, SZ_4K,
  448. global->bad_page_cpu, global->bad_page_dma);
  449. mutex_destroy(&global->lock);
  450. kfree(global);
  451. priv->mmu_global = NULL;
  452. }