lima_vm.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /* Copyright 2017-2019 Qiang Yu <[email protected]> */
  3. #include <linux/slab.h>
  4. #include <linux/dma-mapping.h>
  5. #include "lima_device.h"
  6. #include "lima_vm.h"
  7. #include "lima_gem.h"
  8. #include "lima_regs.h"
  9. struct lima_bo_va {
  10. struct list_head list;
  11. unsigned int ref_count;
  12. struct drm_mm_node node;
  13. struct lima_vm *vm;
  14. };
  15. #define LIMA_VM_PD_SHIFT 22
  16. #define LIMA_VM_PT_SHIFT 12
  17. #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
  18. #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
  19. #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
  20. #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
  21. #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
  22. #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
  23. #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
  24. #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
  25. static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
  26. {
  27. u32 addr;
  28. for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
  29. u32 pbe = LIMA_PBE(addr);
  30. u32 bte = LIMA_BTE(addr);
  31. vm->bts[pbe].cpu[bte] = 0;
  32. }
  33. }
  34. static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
  35. {
  36. u32 pbe = LIMA_PBE(va);
  37. u32 bte = LIMA_BTE(va);
  38. if (!vm->bts[pbe].cpu) {
  39. dma_addr_t pts;
  40. u32 *pd;
  41. int j;
  42. vm->bts[pbe].cpu = dma_alloc_wc(
  43. vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
  44. &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
  45. if (!vm->bts[pbe].cpu)
  46. return -ENOMEM;
  47. pts = vm->bts[pbe].dma;
  48. pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
  49. for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
  50. pd[j] = pts | LIMA_VM_FLAG_PRESENT;
  51. pts += LIMA_PAGE_SIZE;
  52. }
  53. }
  54. vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
  55. return 0;
  56. }
  57. static struct lima_bo_va *
  58. lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
  59. {
  60. struct lima_bo_va *bo_va, *ret = NULL;
  61. list_for_each_entry(bo_va, &bo->va, list) {
  62. if (bo_va->vm == vm) {
  63. ret = bo_va;
  64. break;
  65. }
  66. }
  67. return ret;
  68. }
  69. int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
  70. {
  71. struct lima_bo_va *bo_va;
  72. struct sg_dma_page_iter sg_iter;
  73. int offset = 0, err;
  74. mutex_lock(&bo->lock);
  75. bo_va = lima_vm_bo_find(vm, bo);
  76. if (bo_va) {
  77. bo_va->ref_count++;
  78. mutex_unlock(&bo->lock);
  79. return 0;
  80. }
  81. /* should not create new bo_va if not asked by caller */
  82. if (!create) {
  83. mutex_unlock(&bo->lock);
  84. return -ENOENT;
  85. }
  86. bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
  87. if (!bo_va) {
  88. err = -ENOMEM;
  89. goto err_out0;
  90. }
  91. bo_va->vm = vm;
  92. bo_va->ref_count = 1;
  93. mutex_lock(&vm->lock);
  94. err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
  95. if (err)
  96. goto err_out1;
  97. for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
  98. err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
  99. bo_va->node.start + offset);
  100. if (err)
  101. goto err_out2;
  102. offset += PAGE_SIZE;
  103. }
  104. mutex_unlock(&vm->lock);
  105. list_add_tail(&bo_va->list, &bo->va);
  106. mutex_unlock(&bo->lock);
  107. return 0;
  108. err_out2:
  109. if (offset)
  110. lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
  111. drm_mm_remove_node(&bo_va->node);
  112. err_out1:
  113. mutex_unlock(&vm->lock);
  114. kfree(bo_va);
  115. err_out0:
  116. mutex_unlock(&bo->lock);
  117. return err;
  118. }
  119. void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
  120. {
  121. struct lima_bo_va *bo_va;
  122. u32 size;
  123. mutex_lock(&bo->lock);
  124. bo_va = lima_vm_bo_find(vm, bo);
  125. if (--bo_va->ref_count > 0) {
  126. mutex_unlock(&bo->lock);
  127. return;
  128. }
  129. mutex_lock(&vm->lock);
  130. size = bo->heap_size ? bo->heap_size : bo_va->node.size;
  131. lima_vm_unmap_range(vm, bo_va->node.start,
  132. bo_va->node.start + size - 1);
  133. drm_mm_remove_node(&bo_va->node);
  134. mutex_unlock(&vm->lock);
  135. list_del(&bo_va->list);
  136. mutex_unlock(&bo->lock);
  137. kfree(bo_va);
  138. }
  139. u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
  140. {
  141. struct lima_bo_va *bo_va;
  142. u32 ret;
  143. mutex_lock(&bo->lock);
  144. bo_va = lima_vm_bo_find(vm, bo);
  145. ret = bo_va->node.start;
  146. mutex_unlock(&bo->lock);
  147. return ret;
  148. }
  149. struct lima_vm *lima_vm_create(struct lima_device *dev)
  150. {
  151. struct lima_vm *vm;
  152. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  153. if (!vm)
  154. return NULL;
  155. vm->dev = dev;
  156. mutex_init(&vm->lock);
  157. kref_init(&vm->refcount);
  158. vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
  159. GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
  160. if (!vm->pd.cpu)
  161. goto err_out0;
  162. if (dev->dlbu_cpu) {
  163. int err = lima_vm_map_page(
  164. vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
  165. if (err)
  166. goto err_out1;
  167. }
  168. drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
  169. return vm;
  170. err_out1:
  171. dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
  172. err_out0:
  173. kfree(vm);
  174. return NULL;
  175. }
  176. void lima_vm_release(struct kref *kref)
  177. {
  178. struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
  179. int i;
  180. drm_mm_takedown(&vm->mm);
  181. for (i = 0; i < LIMA_VM_NUM_BT; i++) {
  182. if (vm->bts[i].cpu)
  183. dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
  184. vm->bts[i].cpu, vm->bts[i].dma);
  185. }
  186. if (vm->pd.cpu)
  187. dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
  188. kfree(vm);
  189. }
  190. void lima_vm_print(struct lima_vm *vm)
  191. {
  192. int i, j, k;
  193. u32 *pd, *pt;
  194. if (!vm->pd.cpu)
  195. return;
  196. pd = vm->pd.cpu;
  197. for (i = 0; i < LIMA_VM_NUM_BT; i++) {
  198. if (!vm->bts[i].cpu)
  199. continue;
  200. pt = vm->bts[i].cpu;
  201. for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
  202. int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
  203. printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
  204. for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
  205. u32 pte = *pt++;
  206. if (pte)
  207. printk(KERN_INFO " pt %03x:%08x\n", k, pte);
  208. }
  209. }
  210. }
  211. }
  212. int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
  213. {
  214. struct lima_bo_va *bo_va;
  215. struct sg_dma_page_iter sg_iter;
  216. int offset = 0, err;
  217. u32 base;
  218. mutex_lock(&bo->lock);
  219. bo_va = lima_vm_bo_find(vm, bo);
  220. if (!bo_va) {
  221. err = -ENOENT;
  222. goto err_out0;
  223. }
  224. mutex_lock(&vm->lock);
  225. base = bo_va->node.start + (pageoff << PAGE_SHIFT);
  226. for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
  227. err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
  228. base + offset);
  229. if (err)
  230. goto err_out1;
  231. offset += PAGE_SIZE;
  232. }
  233. mutex_unlock(&vm->lock);
  234. mutex_unlock(&bo->lock);
  235. return 0;
  236. err_out1:
  237. if (offset)
  238. lima_vm_unmap_range(vm, base, base + offset - 1);
  239. mutex_unlock(&vm->lock);
  240. err_out0:
  241. mutex_unlock(&bo->lock);
  242. return err;
  243. }