qe_common.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Common CPM code
  4. *
  5. * Author: Scott Wood <[email protected]>
  6. *
  7. * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
  8. *
  9. * Some parts derived from commproc.c/cpm2_common.c, which is:
  10. * Copyright (c) 1997 Dan error_act ([email protected])
  11. * Copyright (c) 1999-2001 Dan Malek <[email protected]>
  12. * Copyright (c) 2000 MontaVista Software, Inc ([email protected])
  13. * 2006 (c) MontaVista Software, Inc.
  14. * Vitaly Bordug <[email protected]>
  15. */
  16. #include <linux/genalloc.h>
  17. #include <linux/init.h>
  18. #include <linux/list.h>
  19. #include <linux/of_device.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/export.h>
  22. #include <linux/of.h>
  23. #include <linux/of_address.h>
  24. #include <linux/slab.h>
  25. #include <linux/io.h>
  26. #include <soc/fsl/qe/qe.h>
  27. static struct gen_pool *muram_pool;
  28. static DEFINE_SPINLOCK(cpm_muram_lock);
  29. static void __iomem *muram_vbase;
  30. static phys_addr_t muram_pbase;
  31. struct muram_block {
  32. struct list_head head;
  33. s32 start;
  34. int size;
  35. };
  36. static LIST_HEAD(muram_block_list);
  37. /* max address size we deal with */
  38. #define OF_MAX_ADDR_CELLS 4
  39. #define GENPOOL_OFFSET (4096 * 8)
  40. int cpm_muram_init(void)
  41. {
  42. struct device_node *np;
  43. struct resource r;
  44. __be32 zero[OF_MAX_ADDR_CELLS] = {};
  45. resource_size_t max = 0;
  46. int i = 0;
  47. int ret = 0;
  48. if (muram_pbase)
  49. return 0;
  50. np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
  51. if (!np) {
  52. /* try legacy bindings */
  53. np = of_find_node_by_name(NULL, "data-only");
  54. if (!np) {
  55. pr_err("Cannot find CPM muram data node");
  56. ret = -ENODEV;
  57. goto out_muram;
  58. }
  59. }
  60. muram_pool = gen_pool_create(0, -1);
  61. if (!muram_pool) {
  62. pr_err("Cannot allocate memory pool for CPM/QE muram");
  63. ret = -ENOMEM;
  64. goto out_muram;
  65. }
  66. muram_pbase = of_translate_address(np, zero);
  67. if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
  68. pr_err("Cannot translate zero through CPM muram node");
  69. ret = -ENODEV;
  70. goto out_pool;
  71. }
  72. while (of_address_to_resource(np, i++, &r) == 0) {
  73. if (r.end > max)
  74. max = r.end;
  75. ret = gen_pool_add(muram_pool, r.start - muram_pbase +
  76. GENPOOL_OFFSET, resource_size(&r), -1);
  77. if (ret) {
  78. pr_err("QE: couldn't add muram to pool!\n");
  79. goto out_pool;
  80. }
  81. }
  82. muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
  83. if (!muram_vbase) {
  84. pr_err("Cannot map QE muram");
  85. ret = -ENOMEM;
  86. goto out_pool;
  87. }
  88. goto out_muram;
  89. out_pool:
  90. gen_pool_destroy(muram_pool);
  91. out_muram:
  92. of_node_put(np);
  93. return ret;
  94. }
  95. /*
  96. * cpm_muram_alloc_common - cpm_muram_alloc common code
  97. * @size: number of bytes to allocate
  98. * @algo: algorithm for alloc.
  99. * @data: data for genalloc's algorithm.
  100. *
  101. * This function returns a non-negative offset into the muram area, or
  102. * a negative errno on failure.
  103. */
  104. static s32 cpm_muram_alloc_common(unsigned long size,
  105. genpool_algo_t algo, void *data)
  106. {
  107. struct muram_block *entry;
  108. s32 start;
  109. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  110. if (!entry)
  111. return -ENOMEM;
  112. start = gen_pool_alloc_algo(muram_pool, size, algo, data);
  113. if (!start) {
  114. kfree(entry);
  115. return -ENOMEM;
  116. }
  117. start = start - GENPOOL_OFFSET;
  118. memset_io(cpm_muram_addr(start), 0, size);
  119. entry->start = start;
  120. entry->size = size;
  121. list_add(&entry->head, &muram_block_list);
  122. return start;
  123. }
  124. /*
  125. * cpm_muram_alloc - allocate the requested size worth of multi-user ram
  126. * @size: number of bytes to allocate
  127. * @align: requested alignment, in bytes
  128. *
  129. * This function returns a non-negative offset into the muram area, or
  130. * a negative errno on failure.
  131. * Use cpm_dpram_addr() to get the virtual address of the area.
  132. * Use cpm_muram_free() to free the allocation.
  133. */
  134. s32 cpm_muram_alloc(unsigned long size, unsigned long align)
  135. {
  136. s32 start;
  137. unsigned long flags;
  138. struct genpool_data_align muram_pool_data;
  139. spin_lock_irqsave(&cpm_muram_lock, flags);
  140. muram_pool_data.align = align;
  141. start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
  142. &muram_pool_data);
  143. spin_unlock_irqrestore(&cpm_muram_lock, flags);
  144. return start;
  145. }
  146. EXPORT_SYMBOL(cpm_muram_alloc);
  147. /**
  148. * cpm_muram_free - free a chunk of multi-user ram
  149. * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
  150. */
  151. void cpm_muram_free(s32 offset)
  152. {
  153. unsigned long flags;
  154. int size;
  155. struct muram_block *tmp;
  156. if (offset < 0)
  157. return;
  158. size = 0;
  159. spin_lock_irqsave(&cpm_muram_lock, flags);
  160. list_for_each_entry(tmp, &muram_block_list, head) {
  161. if (tmp->start == offset) {
  162. size = tmp->size;
  163. list_del(&tmp->head);
  164. kfree(tmp);
  165. break;
  166. }
  167. }
  168. gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
  169. spin_unlock_irqrestore(&cpm_muram_lock, flags);
  170. }
  171. EXPORT_SYMBOL(cpm_muram_free);
  172. /*
  173. * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
  174. * @offset: offset of allocation start address
  175. * @size: number of bytes to allocate
  176. * This function returns @offset if the area was available, a negative
  177. * errno otherwise.
  178. * Use cpm_dpram_addr() to get the virtual address of the area.
  179. * Use cpm_muram_free() to free the allocation.
  180. */
  181. s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
  182. {
  183. s32 start;
  184. unsigned long flags;
  185. struct genpool_data_fixed muram_pool_data_fixed;
  186. spin_lock_irqsave(&cpm_muram_lock, flags);
  187. muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
  188. start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
  189. &muram_pool_data_fixed);
  190. spin_unlock_irqrestore(&cpm_muram_lock, flags);
  191. return start;
  192. }
  193. EXPORT_SYMBOL(cpm_muram_alloc_fixed);
  194. /**
  195. * cpm_muram_addr - turn a muram offset into a virtual address
  196. * @offset: muram offset to convert
  197. */
  198. void __iomem *cpm_muram_addr(unsigned long offset)
  199. {
  200. return muram_vbase + offset;
  201. }
  202. EXPORT_SYMBOL(cpm_muram_addr);
  203. unsigned long cpm_muram_offset(const void __iomem *addr)
  204. {
  205. return addr - muram_vbase;
  206. }
  207. EXPORT_SYMBOL(cpm_muram_offset);
  208. /**
  209. * cpm_muram_dma - turn a muram virtual address into a DMA address
  210. * @addr: virtual address from cpm_muram_addr() to convert
  211. */
  212. dma_addr_t cpm_muram_dma(void __iomem *addr)
  213. {
  214. return muram_pbase + (addr - muram_vbase);
  215. }
  216. EXPORT_SYMBOL(cpm_muram_dma);
  217. /*
  218. * As cpm_muram_free, but takes the virtual address rather than the
  219. * muram offset.
  220. */
  221. void cpm_muram_free_addr(const void __iomem *addr)
  222. {
  223. if (!addr)
  224. return;
  225. cpm_muram_free(cpm_muram_offset(addr));
  226. }
  227. EXPORT_SYMBOL(cpm_muram_free_addr);