cnss_prealloc.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/mempool.h>
  9. #include <linux/mm.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/version.h>
  13. #ifdef CONFIG_CNSS_OUT_OF_TREE
  14. #include "cnss_prealloc.h"
  15. #else
  16. #include <net/cnss_prealloc.h>
  17. #endif
  18. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  19. /* Ideally header should be from standard include path. So this is not an
  20. * ideal way of header inclusion but use of slab struct to derive cache
  21. * from a mem ptr helps in avoiding additional tracking and/or adding headroom
  22. * of 8 bytes for cache in the beginning of buffer and wasting extra memory,
  23. * particulary in the case when size of memory requested falls around the edge
  24. * of a page boundary. We also have precedence of minidump_memory.c which
  25. * includes mm/slab.h using this style.
  26. */
  27. #include "../mm/slab.h"
  28. #endif
  29. MODULE_LICENSE("GPL v2");
  30. MODULE_DESCRIPTION("CNSS prealloc driver");
  31. /* cnss preallocation scheme is a memory pool that always tries to keep a
  32. * list of free memory for use in emergencies. It is implemented on kernel
  33. * features: memorypool and kmem cache.
  34. */
  35. struct cnss_pool {
  36. size_t size;
  37. int min;
  38. const char name[50];
  39. mempool_t *mp;
  40. struct kmem_cache *cache;
  41. };
  42. /**
  43. * Memory pool
  44. * -----------
  45. *
  46. * How to update this table:
  47. *
  48. * 1. Add a new row with following elements
  49. * size : Size of one allocation unit in bytes.
  50. * min : Minimum units to be reserved. Used only if a regular
  51. * allocation fails.
  52. * name : Name of the cache/pool. Will be displayed in /proc/slabinfo
  53. * if not merged with another pool.
  54. * mp : A pointer to memory pool. Updated during init.
  55. * cache : A pointer to cache. Updated during init.
  56. * 2. Always keep the table in increasing order
  57. * 3. Please keep the reserve pool as minimum as possible as it's always
  58. * preallocated.
  59. * 4. Always profile with different use cases after updating this table.
  60. * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
  61. * 6. Each pool has a sys node at /sys/kernel/slab/<name>
  62. *
  63. */
  64. /* size, min pool reserve, name, memorypool handler, cache handler*/
  65. static struct cnss_pool cnss_pools[] = {
  66. {8 * 1024, 16, "cnss-pool-8k", NULL, NULL},
  67. {16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
  68. {32 * 1024, 22, "cnss-pool-32k", NULL, NULL},
  69. {64 * 1024, 38, "cnss-pool-64k", NULL, NULL},
  70. {128 * 1024, 10, "cnss-pool-128k", NULL, NULL},
  71. };
  72. /**
  73. * cnss_pool_alloc_threshold() - Allocation threshold
  74. *
  75. * Minimum memory size to be part of cnss pool.
  76. *
  77. * Return: Size
  78. *
  79. */
  80. static inline size_t cnss_pool_alloc_threshold(void)
  81. {
  82. return cnss_pools[0].size;
  83. }
  84. /**
  85. * cnss_pool_int() - Initialize memory pools.
  86. *
  87. * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
  88. * the caller to invoke cnss_pool_deinit() routine to clean it up. This
  89. * function needs to be called at early boot to preallocate minimum buffers in
  90. * the pool.
  91. *
  92. * Return: 0 - success, otherwise error code.
  93. *
  94. */
  95. static int cnss_pool_init(void)
  96. {
  97. int i;
  98. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  99. /* Create the slab cache */
  100. cnss_pools[i].cache =
  101. kmem_cache_create_usercopy(cnss_pools[i].name,
  102. cnss_pools[i].size, 0,
  103. SLAB_ACCOUNT, 0,
  104. cnss_pools[i].size, NULL);
  105. if (!cnss_pools[i].cache) {
  106. pr_err("cnss_prealloc: cache %s failed\n",
  107. cnss_pools[i].name);
  108. continue;
  109. }
  110. /* Create the pool and associate to slab cache */
  111. cnss_pools[i].mp =
  112. mempool_create(cnss_pools[i].min, mempool_alloc_slab,
  113. mempool_free_slab, cnss_pools[i].cache);
  114. if (!cnss_pools[i].mp) {
  115. pr_err("cnss_prealloc: mempool %s failed\n",
  116. cnss_pools[i].name);
  117. kmem_cache_destroy(cnss_pools[i].cache);
  118. cnss_pools[i].cache = NULL;
  119. continue;
  120. }
  121. pr_info("cnss_prealloc: created mempool %s of min size %d * %zu\n",
  122. cnss_pools[i].name, cnss_pools[i].min,
  123. cnss_pools[i].size);
  124. }
  125. return 0;
  126. }
  127. /**
  128. * cnss_pool_deinit() - Free memory pools.
  129. *
  130. * Free the memory pools and return resources back to the system. It warns
  131. * if there is any pending element in memory pool or cache.
  132. *
  133. */
  134. static void cnss_pool_deinit(void)
  135. {
  136. int i;
  137. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  138. pr_info("cnss_prealloc: destroy mempool %s\n",
  139. cnss_pools[i].name);
  140. mempool_destroy(cnss_pools[i].mp);
  141. kmem_cache_destroy(cnss_pools[i].cache);
  142. }
  143. }
  144. /**
  145. * cnss_pool_get_index() - Get the index of memory pool
  146. * @mem: Allocated memory
  147. *
  148. * Returns the index of the memory pool which fits the reqested memory. The
  149. * complexity of this check is O(num of memory pools). Returns a negative
  150. * value with error code in case of failure.
  151. *
  152. */
  153. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  154. static int cnss_pool_get_index(void *mem)
  155. {
  156. struct slab *slab;
  157. struct kmem_cache *cache;
  158. int i;
  159. if (!virt_addr_valid(mem))
  160. return -EINVAL;
  161. /* mem -> slab -> cache */
  162. slab = virt_to_slab(mem);
  163. if (!slab)
  164. return -ENOENT;
  165. cache = slab->slab_cache;
  166. if (!cache)
  167. return -ENOENT;
  168. /* Check if memory belongs to a pool */
  169. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  170. if (cnss_pools[i].cache == cache)
  171. return i;
  172. }
  173. return -ENOENT;
  174. }
  175. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  176. static int cnss_pool_get_index(void *mem)
  177. {
  178. struct page *page;
  179. struct kmem_cache *cache;
  180. int i;
  181. if (!virt_addr_valid(mem))
  182. return -EINVAL;
  183. /* mem -> page -> cache */
  184. page = virt_to_head_page(mem);
  185. if (!page)
  186. return -ENOENT;
  187. cache = page->slab_cache;
  188. if (!cache)
  189. return -ENOENT;
  190. /* Check if memory belongs to a pool */
  191. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  192. if (cnss_pools[i].cache == cache)
  193. return i;
  194. }
  195. return -ENOENT;
  196. }
  197. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  198. /**
  199. * wcnss_prealloc_get() - Get preallocated memory from a pool
  200. * @size: Size to allocate
  201. *
  202. * Memory pool is chosen based on the size. If memory is not available in a
  203. * given pool it goes to next higher sized pool until it succeeds.
  204. *
  205. * Return: A void pointer to allocated memory
  206. */
  207. void *wcnss_prealloc_get(size_t size)
  208. {
  209. void *mem = NULL;
  210. gfp_t gfp_mask = __GFP_ZERO;
  211. int i;
  212. if (in_interrupt() || !preemptible() || rcu_preempt_depth())
  213. gfp_mask |= GFP_ATOMIC;
  214. else
  215. gfp_mask |= GFP_KERNEL;
  216. if (size >= cnss_pool_alloc_threshold()) {
  217. for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
  218. if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
  219. mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
  220. if (mem)
  221. break;
  222. }
  223. }
  224. }
  225. if (!mem && size >= cnss_pool_alloc_threshold()) {
  226. pr_debug("cnss_prealloc: not available for size %zu, flag %x\n",
  227. size, gfp_mask);
  228. }
  229. return mem;
  230. }
  231. EXPORT_SYMBOL(wcnss_prealloc_get);
  232. /**
  233. * wcnss_prealloc_put() - Relase allocated memory
  234. * @mem: Allocated memory
  235. *
  236. * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
  237. * pool doesn't have enough elements.
  238. *
  239. * Return: 1 - success
  240. * 0 - fail
  241. */
  242. int wcnss_prealloc_put(void *mem)
  243. {
  244. int i;
  245. if (!mem)
  246. return 0;
  247. i = cnss_pool_get_index(mem);
  248. if (i >= 0 && i < ARRAY_SIZE(cnss_pools) && cnss_pools[i].mp) {
  249. mempool_free(mem, cnss_pools[i].mp);
  250. return 1;
  251. }
  252. return 0;
  253. }
  254. EXPORT_SYMBOL(wcnss_prealloc_put);
  255. /* Not implemented. Make use of Linux SLAB features. */
  256. void wcnss_prealloc_check_memory_leak(void) {}
  257. EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
  258. /* Not implemented. Make use of Linux SLAB features. */
  259. int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
  260. EXPORT_SYMBOL(wcnss_pre_alloc_reset);
  261. /**
  262. * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
  263. * present
  264. *
  265. * Valid device tree node means a node with "qcom,wlan" property present
  266. * and "status" property not disabled.
  267. *
  268. * Return: true if valid device tree node found, false if not found
  269. */
  270. static bool cnss_prealloc_is_valid_dt_node_found(void)
  271. {
  272. struct device_node *dn = NULL;
  273. for_each_node_with_property(dn, "qcom,wlan") {
  274. if (of_device_is_available(dn))
  275. break;
  276. }
  277. if (dn)
  278. return true;
  279. return false;
  280. }
  281. static int __init cnss_prealloc_init(void)
  282. {
  283. if (!cnss_prealloc_is_valid_dt_node_found())
  284. return -ENODEV;
  285. return cnss_pool_init();
  286. }
  287. static void __exit cnss_prealloc_exit(void)
  288. {
  289. cnss_pool_deinit();
  290. }
  291. module_init(cnss_prealloc_init);
  292. module_exit(cnss_prealloc_exit);