cnss_prealloc.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/mempool.h>
  9. #include <linux/mm.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/version.h>
  13. #include "cnss_common.h"
  14. #ifdef CONFIG_CNSS_OUT_OF_TREE
  15. #include "cnss_prealloc.h"
  16. #else
  17. #include <net/cnss_prealloc.h>
  18. #endif
  19. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  20. /* Ideally header should be from standard include path. So this is not an
  21. * ideal way of header inclusion but use of slab struct to derive cache
  22. * from a mem ptr helps in avoiding additional tracking and/or adding headroom
  23. * of 8 bytes for cache in the beginning of buffer and wasting extra memory,
  24. * particulary in the case when size of memory requested falls around the edge
  25. * of a page boundary. We also have precedence of minidump_memory.c which
  26. * includes mm/slab.h using this style.
  27. */
  28. #include "../mm/slab.h"
  29. #endif
  30. MODULE_LICENSE("GPL v2");
  31. MODULE_DESCRIPTION("CNSS prealloc driver");
  32. /* cnss preallocation scheme is a memory pool that always tries to keep a
  33. * list of free memory for use in emergencies. It is implemented on kernel
  34. * features: memorypool and kmem cache.
  35. */
  36. struct cnss_pool {
  37. size_t size;
  38. int min;
  39. const char name[50];
  40. mempool_t *mp;
  41. struct kmem_cache *cache;
  42. };
  43. /**
  44. * Memory pool
  45. * -----------
  46. *
  47. * How to update this table:
  48. *
  49. * 1. Add a new row with following elements
  50. * size : Size of one allocation unit in bytes.
  51. * min : Minimum units to be reserved. Used only if a regular
  52. * allocation fails.
  53. * name : Name of the cache/pool. Will be displayed in /proc/slabinfo
  54. * if not merged with another pool.
  55. * mp : A pointer to memory pool. Updated during init.
  56. * cache : A pointer to cache. Updated during init.
  57. * 2. Always keep the table in increasing order
  58. * 3. Please keep the reserve pool as minimum as possible as it's always
  59. * preallocated.
  60. * 4. Always profile with different use cases after updating this table.
  61. * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
  62. * 6. Each pool has a sys node at /sys/kernel/slab/<name>
  63. *
  64. */
  65. /* size, min pool reserve, name, memorypool handler, cache handler*/
  66. static struct cnss_pool cnss_pools_default[] = {
  67. {8 * 1024, 16, "cnss-pool-8k", NULL, NULL},
  68. {16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
  69. {32 * 1024, 22, "cnss-pool-32k", NULL, NULL},
  70. {64 * 1024, 38, "cnss-pool-64k", NULL, NULL},
  71. {128 * 1024, 10, "cnss-pool-128k", NULL, NULL},
  72. };
  73. static struct cnss_pool cnss_pools_adrastea[] = {
  74. {8 * 1024, 2, "cnss-pool-8k", NULL, NULL},
  75. {16 * 1024, 10, "cnss-pool-16k", NULL, NULL},
  76. {32 * 1024, 8, "cnss-pool-32k", NULL, NULL},
  77. {64 * 1024, 4, "cnss-pool-64k", NULL, NULL},
  78. {128 * 1024, 2, "cnss-pool-128k", NULL, NULL},
  79. };
  80. struct cnss_pool *cnss_pools;
  81. unsigned int cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_default);
  82. /**
  83. * cnss_pool_alloc_threshold() - Allocation threshold
  84. *
  85. * Minimum memory size to be part of cnss pool.
  86. *
  87. * Return: Size
  88. *
  89. */
  90. static inline size_t cnss_pool_alloc_threshold(void)
  91. {
  92. return cnss_pools[0].size;
  93. }
  94. /**
  95. * cnss_pool_int() - Initialize memory pools.
  96. *
  97. * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
  98. * the caller to invoke cnss_pool_deinit() routine to clean it up. This
  99. * function needs to be called at early boot to preallocate minimum buffers in
  100. * the pool.
  101. *
  102. * Return: 0 - success, otherwise error code.
  103. *
  104. */
  105. static int cnss_pool_init(void)
  106. {
  107. int i;
  108. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  109. /* Create the slab cache */
  110. cnss_pools[i].cache =
  111. kmem_cache_create_usercopy(cnss_pools[i].name,
  112. cnss_pools[i].size, 0,
  113. SLAB_ACCOUNT, 0,
  114. cnss_pools[i].size, NULL);
  115. if (!cnss_pools[i].cache) {
  116. pr_err("cnss_prealloc: cache %s failed\n",
  117. cnss_pools[i].name);
  118. continue;
  119. }
  120. /* Create the pool and associate to slab cache */
  121. cnss_pools[i].mp =
  122. mempool_create(cnss_pools[i].min, mempool_alloc_slab,
  123. mempool_free_slab, cnss_pools[i].cache);
  124. if (!cnss_pools[i].mp) {
  125. pr_err("cnss_prealloc: mempool %s failed\n",
  126. cnss_pools[i].name);
  127. kmem_cache_destroy(cnss_pools[i].cache);
  128. cnss_pools[i].cache = NULL;
  129. continue;
  130. }
  131. pr_info("cnss_prealloc: created mempool %s of min size %d * %zu\n",
  132. cnss_pools[i].name, cnss_pools[i].min,
  133. cnss_pools[i].size);
  134. }
  135. return 0;
  136. }
  137. /**
  138. * cnss_pool_deinit() - Free memory pools.
  139. *
  140. * Free the memory pools and return resources back to the system. It warns
  141. * if there is any pending element in memory pool or cache.
  142. *
  143. */
  144. static void cnss_pool_deinit(void)
  145. {
  146. int i;
  147. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  148. pr_info("cnss_prealloc: destroy mempool %s\n",
  149. cnss_pools[i].name);
  150. mempool_destroy(cnss_pools[i].mp);
  151. kmem_cache_destroy(cnss_pools[i].cache);
  152. cnss_pools[i].mp = NULL;
  153. cnss_pools[i].cache = NULL;
  154. }
  155. }
  156. void cnss_assign_prealloc_pool(unsigned long device_id)
  157. {
  158. pr_info("cnss_prealloc: assign cnss pool for device id 0x%lx", device_id);
  159. switch (device_id) {
  160. case ADRASTEA_DEVICE_ID:
  161. cnss_pools = cnss_pools_adrastea;
  162. cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_adrastea);
  163. break;
  164. case WCN6750_DEVICE_ID:
  165. case WCN6450_DEVICE_ID:
  166. case QCA6390_DEVICE_ID:
  167. case QCA6490_DEVICE_ID:
  168. case MANGO_DEVICE_ID:
  169. case PEACH_DEVICE_ID:
  170. case KIWI_DEVICE_ID:
  171. default:
  172. cnss_pools = cnss_pools_default;
  173. cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_default);
  174. }
  175. }
  176. void cnss_initialize_prealloc_pool(unsigned long device_id)
  177. {
  178. cnss_assign_prealloc_pool(device_id);
  179. cnss_pool_init();
  180. }
  181. EXPORT_SYMBOL(cnss_initialize_prealloc_pool);
  182. void cnss_deinitialize_prealloc_pool(void)
  183. {
  184. cnss_pool_deinit();
  185. }
  186. EXPORT_SYMBOL(cnss_deinitialize_prealloc_pool);
  187. /**
  188. * cnss_pool_get_index() - Get the index of memory pool
  189. * @mem: Allocated memory
  190. *
  191. * Returns the index of the memory pool which fits the reqested memory. The
  192. * complexity of this check is O(num of memory pools). Returns a negative
  193. * value with error code in case of failure.
  194. *
  195. */
  196. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  197. static int cnss_pool_get_index(void *mem)
  198. {
  199. struct slab *slab;
  200. struct kmem_cache *cache;
  201. int i;
  202. if (!virt_addr_valid(mem))
  203. return -EINVAL;
  204. /* mem -> slab -> cache */
  205. slab = virt_to_slab(mem);
  206. if (!slab)
  207. return -ENOENT;
  208. cache = slab->slab_cache;
  209. if (!cache)
  210. return -ENOENT;
  211. /* Check if memory belongs to a pool */
  212. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  213. if (cnss_pools[i].cache == cache)
  214. return i;
  215. }
  216. return -ENOENT;
  217. }
  218. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  219. static int cnss_pool_get_index(void *mem)
  220. {
  221. struct page *page;
  222. struct kmem_cache *cache;
  223. int i;
  224. if (!virt_addr_valid(mem))
  225. return -EINVAL;
  226. /* mem -> page -> cache */
  227. page = virt_to_head_page(mem);
  228. if (!page)
  229. return -ENOENT;
  230. cache = page->slab_cache;
  231. if (!cache)
  232. return -ENOENT;
  233. /* Check if memory belongs to a pool */
  234. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  235. if (cnss_pools[i].cache == cache)
  236. return i;
  237. }
  238. return -ENOENT;
  239. }
  240. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  241. /**
  242. * wcnss_prealloc_get() - Get preallocated memory from a pool
  243. * @size: Size to allocate
  244. *
  245. * Memory pool is chosen based on the size. If memory is not available in a
  246. * given pool it goes to next higher sized pool until it succeeds.
  247. *
  248. * Return: A void pointer to allocated memory
  249. */
  250. void *wcnss_prealloc_get(size_t size)
  251. {
  252. void *mem = NULL;
  253. gfp_t gfp_mask = __GFP_ZERO;
  254. int i;
  255. if (in_interrupt() || !preemptible() || rcu_preempt_depth())
  256. gfp_mask |= GFP_ATOMIC;
  257. else
  258. gfp_mask |= GFP_KERNEL;
  259. if (size >= cnss_pool_alloc_threshold()) {
  260. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  261. if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
  262. mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
  263. if (mem)
  264. break;
  265. }
  266. }
  267. }
  268. if (!mem && size >= cnss_pool_alloc_threshold()) {
  269. pr_debug("cnss_prealloc: not available for size %zu, flag %x\n",
  270. size, gfp_mask);
  271. }
  272. return mem;
  273. }
  274. EXPORT_SYMBOL(wcnss_prealloc_get);
  275. /**
  276. * wcnss_prealloc_put() - Relase allocated memory
  277. * @mem: Allocated memory
  278. *
  279. * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
  280. * pool doesn't have enough elements.
  281. *
  282. * Return: 1 - success
  283. * 0 - fail
  284. */
  285. int wcnss_prealloc_put(void *mem)
  286. {
  287. int i;
  288. if (!mem)
  289. return 0;
  290. i = cnss_pool_get_index(mem);
  291. if (i >= 0 && i < cnss_prealloc_pool_size && cnss_pools[i].mp) {
  292. mempool_free(mem, cnss_pools[i].mp);
  293. return 1;
  294. }
  295. return 0;
  296. }
  297. EXPORT_SYMBOL(wcnss_prealloc_put);
  298. /* Not implemented. Make use of Linux SLAB features. */
  299. void wcnss_prealloc_check_memory_leak(void) {}
  300. EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
  301. /* Not implemented. Make use of Linux SLAB features. */
  302. int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
  303. EXPORT_SYMBOL(wcnss_pre_alloc_reset);
  304. /**
  305. * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
  306. * present
  307. *
  308. * Valid device tree node means a node with "qcom,wlan" property present
  309. * and "status" property not disabled.
  310. *
  311. * Return: true if valid device tree node found, false if not found
  312. */
  313. static bool cnss_prealloc_is_valid_dt_node_found(void)
  314. {
  315. struct device_node *dn = NULL;
  316. for_each_node_with_property(dn, "qcom,wlan") {
  317. if (of_device_is_available(dn))
  318. break;
  319. }
  320. if (dn)
  321. return true;
  322. return false;
  323. }
  324. static int __init cnss_prealloc_init(void)
  325. {
  326. if (!cnss_prealloc_is_valid_dt_node_found())
  327. return -ENODEV;
  328. return 0;
  329. }
  330. static void __exit cnss_prealloc_exit(void)
  331. {
  332. return;
  333. }
  334. module_init(cnss_prealloc_init);
  335. module_exit(cnss_prealloc_exit);