coherent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Coherent per-device memory handling.
  4. * Borrowed from i386
  5. */
  6. #include <linux/io.h>
  7. #include <linux/slab.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/dma-direct.h>
  11. #include <linux/dma-map-ops.h>
  12. struct dma_coherent_mem {
  13. void *virt_base;
  14. dma_addr_t device_base;
  15. unsigned long pfn_base;
  16. int size;
  17. unsigned long *bitmap;
  18. spinlock_t spinlock;
  19. bool use_dev_dma_pfn_offset;
  20. };
  21. static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
  22. {
  23. if (dev && dev->dma_mem)
  24. return dev->dma_mem;
  25. return NULL;
  26. }
  27. static inline dma_addr_t dma_get_device_base(struct device *dev,
  28. struct dma_coherent_mem * mem)
  29. {
  30. if (mem->use_dev_dma_pfn_offset)
  31. return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
  32. return mem->device_base;
  33. }
  34. static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
  35. dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
  36. {
  37. struct dma_coherent_mem *dma_mem;
  38. int pages = size >> PAGE_SHIFT;
  39. void *mem_base;
  40. if (!size)
  41. return ERR_PTR(-EINVAL);
  42. mem_base = memremap(phys_addr, size, MEMREMAP_WC);
  43. if (!mem_base)
  44. return ERR_PTR(-EINVAL);
  45. dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  46. if (!dma_mem)
  47. goto out_unmap_membase;
  48. dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL);
  49. if (!dma_mem->bitmap)
  50. goto out_free_dma_mem;
  51. dma_mem->virt_base = mem_base;
  52. dma_mem->device_base = device_addr;
  53. dma_mem->pfn_base = PFN_DOWN(phys_addr);
  54. dma_mem->size = pages;
  55. dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
  56. spin_lock_init(&dma_mem->spinlock);
  57. return dma_mem;
  58. out_free_dma_mem:
  59. kfree(dma_mem);
  60. out_unmap_membase:
  61. memunmap(mem_base);
  62. pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
  63. &phys_addr, size / SZ_1M);
  64. return ERR_PTR(-ENOMEM);
  65. }
  66. static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
  67. {
  68. if (!mem)
  69. return;
  70. memunmap(mem->virt_base);
  71. bitmap_free(mem->bitmap);
  72. kfree(mem);
  73. }
  74. static int dma_assign_coherent_memory(struct device *dev,
  75. struct dma_coherent_mem *mem)
  76. {
  77. if (!dev)
  78. return -ENODEV;
  79. if (dev->dma_mem)
  80. return -EBUSY;
  81. dev->dma_mem = mem;
  82. return 0;
  83. }
  84. /*
  85. * Declare a region of memory to be handed out by dma_alloc_coherent() when it
  86. * is asked for coherent memory for this device. This shall only be used
  87. * from platform code, usually based on the device tree description.
  88. *
  89. * phys_addr is the CPU physical address to which the memory is currently
  90. * assigned (this will be ioremapped so the CPU can access the region).
  91. *
  92. * device_addr is the DMA address the device needs to be programmed with to
  93. * actually address this memory (this will be handed out as the dma_addr_t in
  94. * dma_alloc_coherent()).
  95. *
  96. * size is the size of the area (must be a multiple of PAGE_SIZE).
  97. *
  98. * As a simplification for the platforms, only *one* such region of memory may
  99. * be declared per device.
  100. */
  101. int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  102. dma_addr_t device_addr, size_t size)
  103. {
  104. struct dma_coherent_mem *mem;
  105. int ret;
  106. mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
  107. if (IS_ERR(mem))
  108. return PTR_ERR(mem);
  109. ret = dma_assign_coherent_memory(dev, mem);
  110. if (ret)
  111. _dma_release_coherent_memory(mem);
  112. return ret;
  113. }
  114. void dma_release_coherent_memory(struct device *dev)
  115. {
  116. if (dev)
  117. _dma_release_coherent_memory(dev->dma_mem);
  118. }
  119. static void *__dma_alloc_from_coherent(struct device *dev,
  120. struct dma_coherent_mem *mem,
  121. ssize_t size, dma_addr_t *dma_handle)
  122. {
  123. int order = get_order(size);
  124. unsigned long flags;
  125. int pageno;
  126. void *ret;
  127. spin_lock_irqsave(&mem->spinlock, flags);
  128. if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
  129. goto err;
  130. pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
  131. if (unlikely(pageno < 0))
  132. goto err;
  133. /*
  134. * Memory was found in the coherent area.
  135. */
  136. *dma_handle = dma_get_device_base(dev, mem) +
  137. ((dma_addr_t)pageno << PAGE_SHIFT);
  138. ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
  139. spin_unlock_irqrestore(&mem->spinlock, flags);
  140. memset(ret, 0, size);
  141. return ret;
  142. err:
  143. spin_unlock_irqrestore(&mem->spinlock, flags);
  144. return NULL;
  145. }
  146. /**
  147. * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
  148. * @dev: device from which we allocate memory
  149. * @size: size of requested memory area
  150. * @dma_handle: This will be filled with the correct dma handle
  151. * @ret: This pointer will be filled with the virtual address
  152. * to allocated area.
  153. *
  154. * This function should be only called from per-arch dma_alloc_coherent()
  155. * to support allocation from per-device coherent memory pools.
  156. *
  157. * Returns 0 if dma_alloc_coherent should continue with allocating from
  158. * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
  159. */
  160. int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
  161. dma_addr_t *dma_handle, void **ret)
  162. {
  163. struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
  164. if (!mem)
  165. return 0;
  166. *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
  167. return 1;
  168. }
  169. static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
  170. int order, void *vaddr)
  171. {
  172. if (mem && vaddr >= mem->virt_base && vaddr <
  173. (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
  174. int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  175. unsigned long flags;
  176. spin_lock_irqsave(&mem->spinlock, flags);
  177. bitmap_release_region(mem->bitmap, page, order);
  178. spin_unlock_irqrestore(&mem->spinlock, flags);
  179. return 1;
  180. }
  181. return 0;
  182. }
  183. /**
  184. * dma_release_from_dev_coherent() - free memory to device coherent memory pool
  185. * @dev: device from which the memory was allocated
  186. * @order: the order of pages allocated
  187. * @vaddr: virtual address of allocated pages
  188. *
  189. * This checks whether the memory was allocated from the per-device
  190. * coherent memory pool and if so, releases that memory.
  191. *
  192. * Returns 1 if we correctly released the memory, or 0 if the caller should
  193. * proceed with releasing memory from generic pools.
  194. */
  195. int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
  196. {
  197. struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
  198. return __dma_release_from_coherent(mem, order, vaddr);
  199. }
  200. static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
  201. struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
  202. {
  203. if (mem && vaddr >= mem->virt_base && vaddr + size <=
  204. (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
  205. unsigned long off = vma->vm_pgoff;
  206. int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  207. unsigned long user_count = vma_pages(vma);
  208. int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  209. *ret = -ENXIO;
  210. if (off < count && user_count <= count - off) {
  211. unsigned long pfn = mem->pfn_base + start + off;
  212. *ret = remap_pfn_range(vma, vma->vm_start, pfn,
  213. user_count << PAGE_SHIFT,
  214. vma->vm_page_prot);
  215. }
  216. return 1;
  217. }
  218. return 0;
  219. }
  220. /**
  221. * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
  222. * @dev: device from which the memory was allocated
  223. * @vma: vm_area for the userspace memory
  224. * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
  225. * @size: size of the memory buffer allocated
  226. * @ret: result from remap_pfn_range()
  227. *
  228. * This checks whether the memory was allocated from the per-device
  229. * coherent memory pool and if so, maps that memory to the provided vma.
  230. *
  231. * Returns 1 if @vaddr belongs to the device coherent pool and the caller
  232. * should return @ret, or 0 if they should proceed with mapping memory from
  233. * generic areas.
  234. */
  235. int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
  236. void *vaddr, size_t size, int *ret)
  237. {
  238. struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
  239. return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
  240. }
  241. #ifdef CONFIG_DMA_GLOBAL_POOL
  242. static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
  243. void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
  244. dma_addr_t *dma_handle)
  245. {
  246. if (!dma_coherent_default_memory)
  247. return NULL;
  248. return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
  249. dma_handle);
  250. }
  251. int dma_release_from_global_coherent(int order, void *vaddr)
  252. {
  253. if (!dma_coherent_default_memory)
  254. return 0;
  255. return __dma_release_from_coherent(dma_coherent_default_memory, order,
  256. vaddr);
  257. }
  258. int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
  259. size_t size, int *ret)
  260. {
  261. if (!dma_coherent_default_memory)
  262. return 0;
  263. return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
  264. vaddr, size, ret);
  265. }
  266. int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
  267. {
  268. struct dma_coherent_mem *mem;
  269. mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
  270. if (IS_ERR(mem))
  271. return PTR_ERR(mem);
  272. dma_coherent_default_memory = mem;
  273. pr_info("DMA: default coherent area is set\n");
  274. return 0;
  275. }
  276. #endif /* CONFIG_DMA_GLOBAL_POOL */
  277. /*
  278. * Support for reserved memory regions defined in device tree
  279. */
  280. #ifdef CONFIG_OF_RESERVED_MEM
  281. #include <linux/of.h>
  282. #include <linux/of_fdt.h>
  283. #include <linux/of_reserved_mem.h>
  284. #ifdef CONFIG_DMA_GLOBAL_POOL
  285. static struct reserved_mem *dma_reserved_default_memory __initdata;
  286. #endif
  287. static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
  288. {
  289. if (!rmem->priv) {
  290. struct dma_coherent_mem *mem;
  291. mem = dma_init_coherent_memory(rmem->base, rmem->base,
  292. rmem->size, true);
  293. if (IS_ERR(mem))
  294. return PTR_ERR(mem);
  295. rmem->priv = mem;
  296. }
  297. dma_assign_coherent_memory(dev, rmem->priv);
  298. return 0;
  299. }
  300. static void rmem_dma_device_release(struct reserved_mem *rmem,
  301. struct device *dev)
  302. {
  303. if (dev)
  304. dev->dma_mem = NULL;
  305. }
  306. static const struct reserved_mem_ops rmem_dma_ops = {
  307. .device_init = rmem_dma_device_init,
  308. .device_release = rmem_dma_device_release,
  309. };
  310. static int __init rmem_dma_setup(struct reserved_mem *rmem)
  311. {
  312. unsigned long node = rmem->fdt_node;
  313. if (of_get_flat_dt_prop(node, "reusable", NULL))
  314. return -EINVAL;
  315. #ifdef CONFIG_ARM
  316. if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
  317. pr_err("Reserved memory: regions without no-map are not yet supported\n");
  318. return -EINVAL;
  319. }
  320. #endif
  321. #ifdef CONFIG_DMA_GLOBAL_POOL
  322. if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
  323. WARN(dma_reserved_default_memory,
  324. "Reserved memory: region for default DMA coherent area is redefined\n");
  325. dma_reserved_default_memory = rmem;
  326. }
  327. #endif
  328. rmem->ops = &rmem_dma_ops;
  329. pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
  330. &rmem->base, (unsigned long)rmem->size / SZ_1M);
  331. return 0;
  332. }
  333. #ifdef CONFIG_DMA_GLOBAL_POOL
  334. static int __init dma_init_reserved_memory(void)
  335. {
  336. if (!dma_reserved_default_memory)
  337. return -ENOMEM;
  338. return dma_init_global_coherent(dma_reserved_default_memory->base,
  339. dma_reserved_default_memory->size);
  340. }
  341. core_initcall(dma_init_reserved_memory);
  342. #endif /* CONFIG_DMA_GLOBAL_POOL */
  343. RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
  344. #endif