dp_rx_buffer_pool.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_rx_buffer_pool.h"
  20. #include "dp_ipa.h"
  21. #ifndef DP_RX_BUFFER_POOL_SIZE
  22. #define DP_RX_BUFFER_POOL_SIZE 128
  23. #endif
  24. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  25. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  26. #endif
  27. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  28. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  29. {
  30. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  31. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  32. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  33. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  34. bool consumed = false;
  35. if (!bufpool->is_initialized || !pdev)
  36. return consumed;
  37. /* process only buffers of RXDMA ring */
  38. if (soc->wlan_cfg_ctx->rxdma1_enable)
  39. return consumed;
  40. first_nbuf = nbuf;
  41. while (nbuf) {
  42. next_nbuf = qdf_nbuf_next(nbuf);
  43. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  44. DP_RX_BUFFER_POOL_SIZE))
  45. break;
  46. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  47. RX_BUFFER_RESERVATION,
  48. rx_desc_pool->buf_alignment,
  49. FALSE);
  50. /* Failed to allocate new nbuf, reset and place it back
  51. * in to the pool.
  52. */
  53. if (!refill_nbuf) {
  54. DP_STATS_INC(pdev,
  55. rx_buffer_pool.num_bufs_consumed, 1);
  56. consumed = true;
  57. break;
  58. }
  59. /* Successful allocation!! */
  60. DP_STATS_INC(pdev,
  61. rx_buffer_pool.num_bufs_alloc_success, 1);
  62. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  63. refill_nbuf);
  64. nbuf = next_nbuf;
  65. }
  66. nbuf = first_nbuf;
  67. if (consumed) {
  68. /* Free the MSDU/scattered MSDU */
  69. while (nbuf) {
  70. next_nbuf = qdf_nbuf_next(nbuf);
  71. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  72. nbuf = next_nbuf;
  73. }
  74. }
  75. return consumed;
  76. }
  77. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  78. {
  79. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  80. struct rx_desc_pool *rx_desc_pool;
  81. struct rx_buff_pool *buff_pool;
  82. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  83. mac_id = dp_pdev->lmac_id;
  84. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  85. buff_pool = &soc->rx_buff_pool[mac_id];
  86. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  87. DP_RX_BUFFER_POOL_SIZE) ||
  88. !buff_pool->is_initialized)
  89. return qdf_nbuf_free(nbuf);
  90. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  91. rx_desc_pool->buf_alignment);
  92. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  93. }
  94. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  95. {
  96. struct rx_desc_pool *rx_desc_pool;
  97. struct rx_refill_buff_pool *buff_pool;
  98. qdf_device_t dev;
  99. qdf_nbuf_t nbuf;
  100. QDF_STATUS ret;
  101. int count, i;
  102. uint16_t num_refill;
  103. uint16_t total_num_refill;
  104. uint16_t total_count = 0;
  105. uint16_t head, tail;
  106. if (!soc)
  107. return;
  108. dev = soc->osdev;
  109. buff_pool = &soc->rx_refill_buff_pool;
  110. rx_desc_pool = &soc->rx_desc_buf[0];
  111. if (!buff_pool->is_initialized)
  112. return;
  113. head = buff_pool->head;
  114. tail = buff_pool->tail;
  115. if (tail > head)
  116. total_num_refill = (tail - head - 1);
  117. else
  118. total_num_refill = (buff_pool->max_bufq_len - head +
  119. tail - 1);
  120. while (total_num_refill) {
  121. if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  122. num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  123. else
  124. num_refill = total_num_refill;
  125. count = 0;
  126. for (i = 0; i < num_refill; i++) {
  127. nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
  128. RX_BUFFER_RESERVATION,
  129. rx_desc_pool->buf_alignment,
  130. FALSE);
  131. if (qdf_unlikely(!nbuf))
  132. continue;
  133. ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
  134. QDF_DMA_FROM_DEVICE,
  135. rx_desc_pool->buf_size);
  136. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  137. qdf_nbuf_free(nbuf);
  138. continue;
  139. }
  140. dp_audio_smmu_map(dev,
  141. qdf_mem_paddr_from_dmaaddr(dev,
  142. QDF_NBUF_CB_PADDR(nbuf)),
  143. QDF_NBUF_CB_PADDR(nbuf),
  144. rx_desc_pool->buf_size);
  145. buff_pool->buf_elem[head++] = nbuf;
  146. head &= (buff_pool->max_bufq_len - 1);
  147. count++;
  148. }
  149. if (count) {
  150. buff_pool->head = head;
  151. total_num_refill -= count;
  152. total_count += count;
  153. }
  154. }
  155. DP_STATS_INC(buff_pool->dp_pdev,
  156. rx_refill_buff_pool.num_bufs_refilled,
  157. total_count);
  158. }
  159. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  160. {
  161. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  162. qdf_nbuf_t nbuf = NULL;
  163. uint16_t head, tail;
  164. head = buff_pool->head;
  165. tail = buff_pool->tail;
  166. if (head == tail)
  167. return NULL;
  168. nbuf = buff_pool->buf_elem[tail++];
  169. tail &= (buff_pool->max_bufq_len - 1);
  170. buff_pool->tail = tail;
  171. return nbuf;
  172. }
  173. qdf_nbuf_t
  174. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  175. struct rx_desc_pool *rx_desc_pool,
  176. uint32_t num_available_buffers)
  177. {
  178. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  179. struct rx_buff_pool *buff_pool;
  180. struct dp_srng *dp_rxdma_srng;
  181. qdf_nbuf_t nbuf;
  182. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  183. if (qdf_likely(nbuf)) {
  184. DP_STATS_INC(dp_pdev,
  185. rx_refill_buff_pool.num_bufs_allocated, 1);
  186. return nbuf;
  187. }
  188. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  189. mac_id = dp_pdev->lmac_id;
  190. buff_pool = &soc->rx_buff_pool[mac_id];
  191. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  192. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  193. RX_BUFFER_RESERVATION,
  194. rx_desc_pool->buf_alignment,
  195. FALSE);
  196. if (!buff_pool->is_initialized)
  197. return nbuf;
  198. if (qdf_likely(nbuf)) {
  199. buff_pool->nbuf_fail_cnt = 0;
  200. return nbuf;
  201. }
  202. buff_pool->nbuf_fail_cnt++;
  203. /* Allocate buffer from the buffer pool */
  204. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  205. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  206. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  207. if (nbuf)
  208. DP_STATS_INC(dp_pdev,
  209. rx_buffer_pool.num_pool_bufs_replenish, 1);
  210. }
  211. return nbuf;
  212. }
  213. QDF_STATUS
  214. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  215. struct rx_desc_pool *rx_desc_pool,
  216. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  217. {
  218. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  219. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
  220. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  221. (nbuf_frag_info_t->virt_addr).nbuf,
  222. QDF_DMA_FROM_DEVICE,
  223. rx_desc_pool->buf_size);
  224. if (QDF_IS_STATUS_SUCCESS(ret))
  225. dp_audio_smmu_map(soc->osdev,
  226. qdf_mem_paddr_from_dmaaddr(soc->osdev,
  227. QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)),
  228. QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf),
  229. rx_desc_pool->buf_size);
  230. }
  231. return ret;
  232. }
  233. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  234. {
  235. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  236. qdf_nbuf_t nbuf;
  237. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  238. QDF_STATUS ret;
  239. uint16_t head = 0;
  240. int i;
  241. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  242. dp_err("RX refill buffer pool support is disabled");
  243. buff_pool->is_initialized = false;
  244. return;
  245. }
  246. buff_pool->max_bufq_len =
  247. wlan_cfg_get_rx_refill_buf_pool_size(soc->wlan_cfg_ctx);
  248. buff_pool->buf_elem = qdf_mem_malloc(buff_pool->max_bufq_len *
  249. sizeof(qdf_nbuf_t));
  250. if (!buff_pool->buf_elem) {
  251. dp_err("Failed to allocate memory for RX refill buf element");
  252. buff_pool->is_initialized = false;
  253. return;
  254. }
  255. buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  256. buff_pool->tail = 0;
  257. for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
  258. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  259. RX_BUFFER_RESERVATION,
  260. rx_desc_pool->buf_alignment, FALSE);
  261. if (!nbuf)
  262. continue;
  263. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  264. QDF_DMA_FROM_DEVICE,
  265. rx_desc_pool->buf_size);
  266. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  267. qdf_nbuf_free(nbuf);
  268. continue;
  269. }
  270. dp_audio_smmu_map(soc->osdev,
  271. qdf_mem_paddr_from_dmaaddr(soc->osdev,
  272. QDF_NBUF_CB_PADDR(nbuf)),
  273. QDF_NBUF_CB_PADDR(nbuf),
  274. rx_desc_pool->buf_size);
  275. buff_pool->buf_elem[head] = nbuf;
  276. head++;
  277. }
  278. buff_pool->head = head;
  279. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  280. buff_pool->max_bufq_len,
  281. buff_pool->head);
  282. buff_pool->is_initialized = true;
  283. }
  284. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  285. {
  286. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  287. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  288. qdf_nbuf_t nbuf;
  289. int i;
  290. dp_rx_refill_buff_pool_init(soc, mac_id);
  291. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  292. dp_info("RX buffer pool support is disabled");
  293. buff_pool->is_initialized = false;
  294. return;
  295. }
  296. if (buff_pool->is_initialized)
  297. return;
  298. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  299. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  300. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  301. RX_BUFFER_RESERVATION,
  302. rx_desc_pool->buf_alignment, FALSE);
  303. if (!nbuf)
  304. continue;
  305. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  306. nbuf);
  307. }
  308. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  309. DP_RX_BUFFER_POOL_SIZE,
  310. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  311. buff_pool->is_initialized = true;
  312. }
  313. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  314. {
  315. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  316. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  317. qdf_nbuf_t nbuf;
  318. uint32_t count = 0;
  319. if (!buff_pool->is_initialized)
  320. return;
  321. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  322. dp_audio_smmu_unmap(soc->osdev,
  323. QDF_NBUF_CB_PADDR(nbuf),
  324. rx_desc_pool->buf_size);
  325. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  326. QDF_DMA_BIDIRECTIONAL,
  327. rx_desc_pool->buf_size);
  328. qdf_nbuf_free(nbuf);
  329. count++;
  330. }
  331. dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
  332. count, buff_pool->head, buff_pool->tail);
  333. qdf_mem_free(buff_pool->buf_elem);
  334. buff_pool->is_initialized = false;
  335. }
  336. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  337. {
  338. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  339. qdf_nbuf_t nbuf;
  340. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  341. if (!buff_pool->is_initialized)
  342. return;
  343. dp_info("buffers in the RX buffer pool during deinit: %u",
  344. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  345. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  346. qdf_nbuf_free(nbuf);
  347. buff_pool->is_initialized = false;
  348. }
  349. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */