cnss_prealloc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/mempool.h>
  9. #include <linux/mm.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/version.h>
  13. #include "cnss_common.h"
  14. #ifdef CONFIG_CNSS_OUT_OF_TREE
  15. #include "cnss_prealloc.h"
  16. #else
  17. #include <net/cnss_prealloc.h>
  18. #endif
  19. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  20. /* Ideally header should be from standard include path. So this is not an
  21. * ideal way of header inclusion but use of slab struct to derive cache
  22. * from a mem ptr helps in avoiding additional tracking and/or adding headroom
  23. * of 8 bytes for cache in the beginning of buffer and wasting extra memory,
  24. * particulary in the case when size of memory requested falls around the edge
  25. * of a page boundary. We also have precedence of minidump_memory.c which
  26. * includes mm/slab.h using this style.
  27. */
  28. #include "../mm/slab.h"
  29. #endif
  30. MODULE_LICENSE("GPL v2");
  31. MODULE_DESCRIPTION("CNSS prealloc driver");
  32. /* cnss preallocation scheme is a memory pool that always tries to keep a
  33. * list of free memory for use in emergencies. It is implemented on kernel
  34. * features: memorypool and kmem cache.
  35. */
  36. struct cnss_pool {
  37. size_t size;
  38. int min;
  39. const char name[50];
  40. mempool_t *mp;
  41. struct kmem_cache *cache;
  42. };
  43. /**
  44. * Memory pool
  45. * -----------
  46. *
  47. * How to update this table:
  48. *
  49. * 1. Add a new row with following elements
  50. * size : Size of one allocation unit in bytes.
  51. * min : Minimum units to be reserved. Used only if a regular
  52. * allocation fails.
  53. * name : Name of the cache/pool. Will be displayed in /proc/slabinfo
  54. * if not merged with another pool.
  55. * mp : A pointer to memory pool. Updated during init.
  56. * cache : A pointer to cache. Updated during init.
  57. * 2. Always keep the table in increasing order
  58. * 3. Please keep the reserve pool as minimum as possible as it's always
  59. * preallocated.
  60. * 4. Always profile with different use cases after updating this table.
  61. * 5. A dynamic view of this pool can be viewed at /proc/slabinfo.
  62. * 6. Each pool has a sys node at /sys/kernel/slab/<name>
  63. *
  64. */
  65. /* size, min pool reserve, name, memorypool handler, cache handler*/
  66. static struct cnss_pool cnss_pools_default[] = {
  67. {8 * 1024, 16, "cnss-pool-8k", NULL, NULL},
  68. {16 * 1024, 16, "cnss-pool-16k", NULL, NULL},
  69. {32 * 1024, 22, "cnss-pool-32k", NULL, NULL},
  70. {64 * 1024, 38, "cnss-pool-64k", NULL, NULL},
  71. {128 * 1024, 10, "cnss-pool-128k", NULL, NULL},
  72. };
  73. static struct cnss_pool cnss_pools_adrastea[] = {
  74. {8 * 1024, 2, "cnss-pool-8k", NULL, NULL},
  75. {16 * 1024, 10, "cnss-pool-16k", NULL, NULL},
  76. {32 * 1024, 8, "cnss-pool-32k", NULL, NULL},
  77. {64 * 1024, 4, "cnss-pool-64k", NULL, NULL},
  78. {128 * 1024, 2, "cnss-pool-128k", NULL, NULL},
  79. };
  80. static struct cnss_pool cnss_pools_wcn6750[] = {
  81. {8 * 1024, 2, "cnss-pool-8k", NULL, NULL},
  82. {16 * 1024, 8, "cnss-pool-16k", NULL, NULL},
  83. {32 * 1024, 11, "cnss-pool-32k", NULL, NULL},
  84. {64 * 1024, 15, "cnss-pool-64k", NULL, NULL},
  85. {128 * 1024, 4, "cnss-pool-128k", NULL, NULL},
  86. };
  87. struct cnss_pool *cnss_pools;
  88. unsigned int cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_default);
  89. /**
  90. * cnss_pool_alloc_threshold() - Allocation threshold
  91. *
  92. * Minimum memory size to be part of cnss pool.
  93. *
  94. * Return: Size
  95. *
  96. */
  97. static inline size_t cnss_pool_alloc_threshold(void)
  98. {
  99. return cnss_pools[0].size;
  100. }
  101. /**
  102. * cnss_pool_int() - Initialize memory pools.
  103. *
  104. * Create cnss pools as configured by cnss_pools[]. It is the responsibility of
  105. * the caller to invoke cnss_pool_deinit() routine to clean it up. This
  106. * function needs to be called at early boot to preallocate minimum buffers in
  107. * the pool.
  108. *
  109. * Return: 0 - success, otherwise error code.
  110. *
  111. */
  112. static int cnss_pool_init(void)
  113. {
  114. int i;
  115. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  116. /* Create the slab cache */
  117. cnss_pools[i].cache =
  118. kmem_cache_create_usercopy(cnss_pools[i].name,
  119. cnss_pools[i].size, 0,
  120. SLAB_ACCOUNT, 0,
  121. cnss_pools[i].size, NULL);
  122. if (!cnss_pools[i].cache) {
  123. pr_err("cnss_prealloc: cache %s failed\n",
  124. cnss_pools[i].name);
  125. continue;
  126. }
  127. /* Create the pool and associate to slab cache */
  128. cnss_pools[i].mp =
  129. mempool_create(cnss_pools[i].min, mempool_alloc_slab,
  130. mempool_free_slab, cnss_pools[i].cache);
  131. if (!cnss_pools[i].mp) {
  132. pr_err("cnss_prealloc: mempool %s failed\n",
  133. cnss_pools[i].name);
  134. kmem_cache_destroy(cnss_pools[i].cache);
  135. cnss_pools[i].cache = NULL;
  136. continue;
  137. }
  138. pr_info("cnss_prealloc: created mempool %s of min size %d * %zu\n",
  139. cnss_pools[i].name, cnss_pools[i].min,
  140. cnss_pools[i].size);
  141. }
  142. return 0;
  143. }
  144. /**
  145. * cnss_pool_deinit() - Free memory pools.
  146. *
  147. * Free the memory pools and return resources back to the system. It warns
  148. * if there is any pending element in memory pool or cache.
  149. *
  150. */
  151. static void cnss_pool_deinit(void)
  152. {
  153. int i;
  154. if (!cnss_pools)
  155. return;
  156. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  157. pr_info("cnss_prealloc: destroy mempool %s\n",
  158. cnss_pools[i].name);
  159. mempool_destroy(cnss_pools[i].mp);
  160. kmem_cache_destroy(cnss_pools[i].cache);
  161. cnss_pools[i].mp = NULL;
  162. cnss_pools[i].cache = NULL;
  163. }
  164. }
  165. void cnss_assign_prealloc_pool(unsigned long device_id)
  166. {
  167. pr_info("cnss_prealloc: assign cnss pool for device id 0x%lx", device_id);
  168. switch (device_id) {
  169. case ADRASTEA_DEVICE_ID:
  170. cnss_pools = cnss_pools_adrastea;
  171. cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_adrastea);
  172. break;
  173. case WCN6750_DEVICE_ID:
  174. cnss_pools = cnss_pools_wcn6750;
  175. cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_wcn6750);
  176. break;
  177. case WCN6450_DEVICE_ID:
  178. case QCA6390_DEVICE_ID:
  179. case QCA6490_DEVICE_ID:
  180. case MANGO_DEVICE_ID:
  181. case PEACH_DEVICE_ID:
  182. case KIWI_DEVICE_ID:
  183. default:
  184. cnss_pools = cnss_pools_default;
  185. cnss_prealloc_pool_size = ARRAY_SIZE(cnss_pools_default);
  186. }
  187. }
  188. void cnss_initialize_prealloc_pool(unsigned long device_id)
  189. {
  190. cnss_assign_prealloc_pool(device_id);
  191. cnss_pool_init();
  192. }
  193. EXPORT_SYMBOL(cnss_initialize_prealloc_pool);
  194. void cnss_deinitialize_prealloc_pool(void)
  195. {
  196. cnss_pool_deinit();
  197. }
  198. EXPORT_SYMBOL(cnss_deinitialize_prealloc_pool);
  199. /**
  200. * cnss_pool_get_index() - Get the index of memory pool
  201. * @mem: Allocated memory
  202. *
  203. * Returns the index of the memory pool which fits the reqested memory. The
  204. * complexity of this check is O(num of memory pools). Returns a negative
  205. * value with error code in case of failure.
  206. *
  207. */
  208. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
  209. static int cnss_pool_get_index(void *mem)
  210. {
  211. struct slab *slab;
  212. struct kmem_cache *cache;
  213. int i;
  214. if (!virt_addr_valid(mem))
  215. return -EINVAL;
  216. /* mem -> slab -> cache */
  217. slab = virt_to_slab(mem);
  218. if (!slab)
  219. return -ENOENT;
  220. cache = slab->slab_cache;
  221. if (!cache)
  222. return -ENOENT;
  223. /* Check if memory belongs to a pool */
  224. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  225. if (cnss_pools[i].cache == cache)
  226. return i;
  227. }
  228. return -ENOENT;
  229. }
  230. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  231. static int cnss_pool_get_index(void *mem)
  232. {
  233. struct page *page;
  234. struct kmem_cache *cache;
  235. int i;
  236. if (!virt_addr_valid(mem))
  237. return -EINVAL;
  238. /* mem -> page -> cache */
  239. page = virt_to_head_page(mem);
  240. if (!page)
  241. return -ENOENT;
  242. cache = page->slab_cache;
  243. if (!cache)
  244. return -ENOENT;
  245. /* Check if memory belongs to a pool */
  246. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  247. if (cnss_pools[i].cache == cache)
  248. return i;
  249. }
  250. return -ENOENT;
  251. }
  252. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
  253. /**
  254. * wcnss_prealloc_get() - Get preallocated memory from a pool
  255. * @size: Size to allocate
  256. *
  257. * Memory pool is chosen based on the size. If memory is not available in a
  258. * given pool it goes to next higher sized pool until it succeeds.
  259. *
  260. * Return: A void pointer to allocated memory
  261. */
  262. void *wcnss_prealloc_get(size_t size)
  263. {
  264. void *mem = NULL;
  265. gfp_t gfp_mask = __GFP_ZERO;
  266. int i;
  267. if (!cnss_pools)
  268. return mem;
  269. if (in_interrupt() || !preemptible() || rcu_preempt_depth())
  270. gfp_mask |= GFP_ATOMIC;
  271. else
  272. gfp_mask |= GFP_KERNEL;
  273. if (size >= cnss_pool_alloc_threshold()) {
  274. for (i = 0; i < cnss_prealloc_pool_size; i++) {
  275. if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
  276. mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
  277. if (mem)
  278. break;
  279. }
  280. }
  281. }
  282. if (!mem && size >= cnss_pool_alloc_threshold()) {
  283. pr_debug("cnss_prealloc: not available for size %zu, flag %x\n",
  284. size, gfp_mask);
  285. }
  286. return mem;
  287. }
  288. EXPORT_SYMBOL(wcnss_prealloc_get);
  289. /**
  290. * wcnss_prealloc_put() - Relase allocated memory
  291. * @mem: Allocated memory
  292. *
  293. * Free the memory got by wcnss_prealloc_get() to slab or pool reserve if memory
  294. * pool doesn't have enough elements.
  295. *
  296. * Return: 1 - success
  297. * 0 - fail
  298. */
  299. int wcnss_prealloc_put(void *mem)
  300. {
  301. int i;
  302. if (!mem || !cnss_pools)
  303. return 0;
  304. i = cnss_pool_get_index(mem);
  305. if (i >= 0 && i < cnss_prealloc_pool_size && cnss_pools[i].mp) {
  306. mempool_free(mem, cnss_pools[i].mp);
  307. return 1;
  308. }
  309. return 0;
  310. }
  311. EXPORT_SYMBOL(wcnss_prealloc_put);
  312. /* Not implemented. Make use of Linux SLAB features. */
  313. void wcnss_prealloc_check_memory_leak(void) {}
  314. EXPORT_SYMBOL(wcnss_prealloc_check_memory_leak);
  315. /* Not implemented. Make use of Linux SLAB features. */
  316. int wcnss_pre_alloc_reset(void) { return -EOPNOTSUPP; }
  317. EXPORT_SYMBOL(wcnss_pre_alloc_reset);
  318. /**
  319. * cnss_prealloc_is_valid_dt_node_found - Check if valid device tree node
  320. * present
  321. *
  322. * Valid device tree node means a node with "qcom,wlan" property present
  323. * and "status" property not disabled.
  324. *
  325. * Return: true if valid device tree node found, false if not found
  326. */
  327. static bool cnss_prealloc_is_valid_dt_node_found(void)
  328. {
  329. struct device_node *dn = NULL;
  330. for_each_node_with_property(dn, "qcom,wlan") {
  331. if (of_device_is_available(dn))
  332. break;
  333. }
  334. if (dn)
  335. return true;
  336. return false;
  337. }
  338. static int __init cnss_prealloc_init(void)
  339. {
  340. if (!cnss_prealloc_is_valid_dt_node_found())
  341. return -ENODEV;
  342. return 0;
  343. }
  344. static void __exit cnss_prealloc_exit(void)
  345. {
  346. return;
  347. }
  348. module_init(cnss_prealloc_init);
  349. module_exit(cnss_prealloc_exit);