cnss_prealloc.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/mempool.h>
  9. #include <linux/mm.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/version.h>
  13. #ifdef CONFIG_CNSS_OUT_OF_TREE
  14. #include "cnss_prealloc.h"
  15. #else
  16. #include <net/cnss_prealloc.h>
  17. #endif
  18. MODULE_LICENSE("GPL v2");
  19. MODULE_DESCRIPTION("CNSS prealloc driver");
  20. /* cnss preallocation scheme is a memory pool that always tries to keep a
  21. * list of free memory for use in emergencies. It is implemented on kernel
  22. * features: memorypool and kmem cache.
  23. */
  24. struct cnss_pool {
  25. size_t size;
  26. int min;
  27. const char name[50];
  28. mempool_t *mp;
  29. struct kmem_cache *cache;
  30. };
  31. /**
  32. * Memory pool
  33. * -----------
  34. *
  35. * How to update this table:
  36. *
  37. * 1. Add a new row with following elements
  38. * size : Size of one allocation unit in bytes.
  39. * min : Minimum units to be reserved. Used only if a regular
  40. * allocation fails.
  41. * name : Name of the cache/pool. Will be displayed in /proc/slabinfo
  42. * if not merged with another pool.
  43. * mp : A pointer to memory pool. Updated during init.
  44. * cache : A pointer to cache. Updated during init.
  45. * 2. Always keep the table in increasing order
  46. * 3. Please keep the reserve pool as minimum as possible as it's always
  47. * preallocated.
  48. * 4. Always profile with different use cases after updating this table.
  49. * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
  50. * 6. Each pool has a sys node at /sys/kernel/slab/<name>
  51. *
  52. */
  53. /* size, min pool reserve, name, memorypool handler, cache handler*/
  54. static struct cnss_pool cnss_pools[] = {
  55. {8 * 1024, 16, "cnss-pool-8k", NULL, NULL},
  56. {16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
  57. {32 * 1024, 22, "cnss-pool-32k", NULL, NULL},
  58. {64 * 1024, 38, "cnss-pool-64k", NULL, NULL},
  59. {128 * 1024, 10, "cnss-pool-128k", NULL, NULL},
  60. };
  61. /**
  62. * cnss_pool_alloc_threshold() - Allocation threshold
  63. *
  64. * Minimum memory size to be part of cnss pool.
  65. *
  66. * Return: Size
  67. *
  68. */
  69. static inline size_t cnss_pool_alloc_threshold(void)
  70. {
  71. return cnss_pools[0].size;
  72. }
  73. /**
  74. * cnss_pool_int() - Initialize memory pools.
  75. *
  76. * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
  77. * the caller to invoke cnss_pool_deinit() routine to clean it up. This
  78. * function needs to be called at early boot to preallocate minimum buffers in
  79. * the pool.
  80. *
  81. * Return: 0 - success, otherwise error code.
  82. *
  83. */
  84. static int cnss_pool_init(void)
  85. {
  86. int i;
  87. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  88. /* Create the slab cache */
  89. cnss_pools[i].cache =
  90. kmem_cache_create_usercopy(cnss_pools[i].name,
  91. cnss_pools[i].size, 0,
  92. SLAB_ACCOUNT, 0,
  93. cnss_pools[i].size, NULL);
  94. if (!cnss_pools[i].cache) {
  95. pr_err("cnss_prealloc: cache %s failed\n",
  96. cnss_pools[i].name);
  97. continue;
  98. }
  99. /* Create the pool and associate to slab cache */
  100. cnss_pools[i].mp =
  101. mempool_create(cnss_pools[i].min, mempool_alloc_slab,
  102. mempool_free_slab, cnss_pools[i].cache);
  103. if (!cnss_pools[i].mp) {
  104. pr_err("cnss_prealloc: mempool %s failed\n",
  105. cnss_pools[i].name);
  106. kmem_cache_destroy(cnss_pools[i].cache);
  107. cnss_pools[i].cache = NULL;
  108. continue;
  109. }
  110. pr_info("cnss_prealloc: created mempool %s of min size %d * %zu\n",
  111. cnss_pools[i].name, cnss_pools[i].min,
  112. cnss_pools[i].size);
  113. }
  114. return 0;
  115. }
  116. /**
  117. * cnss_pool_deinit() - Free memory pools.
  118. *
  119. * Free the memory pools and return resources back to the system. It warns
  120. * if there is any pending element in memory pool or cache.
  121. *
  122. */
  123. static void cnss_pool_deinit(void)
  124. {
  125. int i;
  126. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  127. pr_info("cnss_prealloc: destroy mempool %s\n",
  128. cnss_pools[i].name);
  129. mempool_destroy(cnss_pools[i].mp);
  130. kmem_cache_destroy(cnss_pools[i].cache);
  131. }
  132. }
  133. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  134. /* In kernel 5.17, slab_cache is removed from page struct, so
  135. * store cache in the beginning of memory buffer.
  136. */
  137. static inline void cnss_pool_put_cache_in_mem(void *mem, struct kmem_cache *cache)
  138. {
  139. /* put cache at the beginnging of mem */
  140. (*(struct kmem_cache **)mem) = cache;
  141. }
  142. static inline struct kmem_cache *cnss_pool_get_cache_from_mem(void *mem)
  143. {
  144. struct kmem_cache *cache;
  145. /* read cache from the beginnging of mem */
  146. cache = (struct kmem_cache *)(*(struct kmem_cache **)mem);
  147. return cache;
  148. }
  149. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  150. /* for older kernel < 5.17, we use page->slab_cache. In such case
  151. * we do not reserve headroom in memory buffer to store cache.
  152. */
  153. static inline void cnss_pool_put_cache_in_mem(void *mem, struct kmem_cache *cache)
  154. {
  155. }
  156. static inline struct kmem_cache *cnss_pool_get_cache_from_mem(void *mem)
  157. {
  158. struct page *page;
  159. if (!virt_addr_valid(mem))
  160. return NULL;
  161. /* mem -> page -> cache */
  162. page = virt_to_head_page(mem);
  163. if (!page)
  164. return NULL;
  165. return page->slab_cache;
  166. }
  167. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  168. /**
  169. * cnss_pool_get_index() - Get the index of memory pool
  170. * @mem: Allocated memory
  171. *
  172. * Returns the index of the memory pool which fits the reqested memory. The
  173. * complexity of this check is O(num of memory pools). Returns a negative
  174. * value with error code in case of failure.
  175. *
  176. */
  177. static int cnss_pool_get_index(void *mem)
  178. {
  179. struct kmem_cache *cache;
  180. int i;
  181. cache = cnss_pool_get_cache_from_mem(mem);
  182. if (!cache)
  183. return -ENOENT;
  184. /* Check if memory belongs to a pool */
  185. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  186. if (cnss_pools[i].cache == cache)
  187. return i;
  188. }
  189. return -ENOENT;
  190. }
  191. /**
  192. * wcnss_prealloc_get() - Get preallocated memory from a pool
  193. * @size: Size to allocate
  194. *
  195. * Memory pool is chosen based on the size. If memory is not available in a
  196. * given pool it goes to next higher sized pool until it succeeds.
  197. *
  198. * Return: A void pointer to allocated memory
  199. */
  200. void *wcnss_prealloc_get(size_t size)
  201. {
  202. void *mem = NULL;
  203. gfp_t gfp_mask = __GFP_ZERO;
  204. int i;
  205. if (in_interrupt() || !preemptible() || rcu_preempt_depth())
  206. gfp_mask |= GFP_ATOMIC;
  207. else
  208. gfp_mask |= GFP_KERNEL;
  209. if (size >= cnss_pool_alloc_threshold()) {
  210. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  211. if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
  212. mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
  213. if (mem) {
  214. cnss_pool_put_cache_in_mem(mem, cnss_pools[i].cache);
  215. break;
  216. }
  217. }
  218. }
  219. }
  220. if (!mem && size >= cnss_pool_alloc_threshold()) {
  221. pr_debug("cnss_prealloc: not available for size %zu, flag %x\n",
  222. size, gfp_mask);
  223. }
  224. return mem;
  225. }
  226. EXPORT_SYMBOL(wcnss_prealloc_get);
  227. /**
  228. * wcnss_prealloc_put() - Relase allocated memory
  229. * @mem: Allocated memory
  230. *
  231. * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
  232. * pool doesn't have enough elements.
  233. *
  234. * Return: 1 - success
  235. * 0 - fail
  236. */
  237. int wcnss_prealloc_put(void *mem)
  238. {
  239. int i;
  240. if (!mem)
  241. return 0;
  242. i = cnss_pool_get_index(mem);
  243. if (i >= 0 && i < ARRAY_SIZE(cnss_pools) && cnss_pools[i].mp) {
  244. mempool_free(mem, cnss_pools[i].mp);
  245. return 1;
  246. }
  247. return 0;
  248. }
  249. EXPORT_SYMBOL(wcnss_prealloc_put);
  250. /* Not implemented. Make use of Linux SLAB features. */
  251. void wcnss_prealloc_check_memory_leak(void) {}
  252. EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
  253. /* Not implemented. Make use of Linux SLAB features. */
  254. int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
  255. EXPORT_SYMBOL(wcnss_pre_alloc_reset);
  256. /**
  257. * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
  258. * present
  259. *
  260. * Valid device tree node means a node with "qcom,wlan" property present
  261. * and "status" property not disabled.
  262. *
  263. * Return: true if valid device tree node found, false if not found
  264. */
  265. static bool cnss_prealloc_is_valid_dt_node_found(void)
  266. {
  267. struct device_node *dn = NULL;
  268. for_each_node_with_property(dn, "qcom,wlan") {
  269. if (of_device_is_available(dn))
  270. break;
  271. }
  272. if (dn)
  273. return true;
  274. return false;
  275. }
  276. static int __init cnss_prealloc_init(void)
  277. {
  278. if (!cnss_prealloc_is_valid_dt_node_found())
  279. return -ENODEV;
  280. return cnss_pool_init();
  281. }
  282. static void __exit cnss_prealloc_exit(void)
  283. {
  284. cnss_pool_deinit();
  285. }
  286. module_init(cnss_prealloc_init);
  287. module_exit(cnss_prealloc_exit);