dp_rx_buffer_pool.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_rx_buffer_pool.h"
  20. #include "dp_ipa.h"
  21. #ifndef DP_RX_BUFFER_POOL_SIZE
  22. #define DP_RX_BUFFER_POOL_SIZE 128
  23. #endif
  24. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  25. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  26. #endif
  27. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  28. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  29. {
  30. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  31. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  32. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  33. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  34. bool consumed = false;
  35. if (!bufpool->is_initialized || !pdev)
  36. return consumed;
  37. /* process only buffers of RXDMA ring */
  38. if (soc->wlan_cfg_ctx->rxdma1_enable)
  39. return consumed;
  40. first_nbuf = nbuf;
  41. while (nbuf) {
  42. next_nbuf = qdf_nbuf_next(nbuf);
  43. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  44. DP_RX_BUFFER_POOL_SIZE))
  45. break;
  46. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  47. RX_BUFFER_RESERVATION,
  48. rx_desc_pool->buf_alignment,
  49. FALSE);
  50. /* Failed to allocate new nbuf, reset and place it back
  51. * in to the pool.
  52. */
  53. if (!refill_nbuf) {
  54. DP_STATS_INC(pdev,
  55. rx_buffer_pool.num_bufs_consumed, 1);
  56. consumed = true;
  57. break;
  58. }
  59. /* Successful allocation!! */
  60. DP_STATS_INC(pdev,
  61. rx_buffer_pool.num_bufs_alloc_success, 1);
  62. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  63. refill_nbuf);
  64. nbuf = next_nbuf;
  65. }
  66. nbuf = first_nbuf;
  67. if (consumed) {
  68. /* Free the MSDU/scattered MSDU */
  69. while (nbuf) {
  70. next_nbuf = qdf_nbuf_next(nbuf);
  71. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  72. nbuf = next_nbuf;
  73. }
  74. }
  75. return consumed;
  76. }
  77. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  78. {
  79. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  80. struct rx_desc_pool *rx_desc_pool;
  81. struct rx_buff_pool *buff_pool;
  82. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  83. mac_id = dp_pdev->lmac_id;
  84. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  85. buff_pool = &soc->rx_buff_pool[mac_id];
  86. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  87. DP_RX_BUFFER_POOL_SIZE) ||
  88. !buff_pool->is_initialized)
  89. return qdf_nbuf_free(nbuf);
  90. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  91. rx_desc_pool->buf_alignment);
  92. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  93. }
  94. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  95. {
  96. struct rx_desc_pool *rx_desc_pool;
  97. struct rx_refill_buff_pool *buff_pool;
  98. qdf_device_t dev;
  99. qdf_nbuf_t nbuf;
  100. QDF_STATUS ret;
  101. int count, i;
  102. uint16_t num_refill;
  103. uint16_t total_num_refill;
  104. uint16_t total_count = 0;
  105. uint16_t head, tail;
  106. if (!soc)
  107. return;
  108. dev = soc->osdev;
  109. buff_pool = &soc->rx_refill_buff_pool;
  110. rx_desc_pool = &soc->rx_desc_buf[0];
  111. if (!buff_pool->is_initialized)
  112. return;
  113. head = buff_pool->head;
  114. tail = buff_pool->tail;
  115. if (tail > head)
  116. total_num_refill = (tail - head - 1);
  117. else
  118. total_num_refill = (DP_RX_REFILL_BUFF_POOL_SIZE - head +
  119. tail - 1);
  120. while (total_num_refill) {
  121. if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  122. num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  123. else
  124. num_refill = total_num_refill;
  125. count = 0;
  126. for (i = 0; i < num_refill; i++) {
  127. nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
  128. RX_BUFFER_RESERVATION,
  129. rx_desc_pool->buf_alignment,
  130. FALSE);
  131. if (qdf_unlikely(!nbuf))
  132. continue;
  133. ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
  134. QDF_DMA_FROM_DEVICE,
  135. rx_desc_pool->buf_size);
  136. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  137. qdf_nbuf_free(nbuf);
  138. continue;
  139. }
  140. buff_pool->buf_elem[head++] = nbuf;
  141. head &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
  142. count++;
  143. }
  144. if (count) {
  145. buff_pool->head = head;
  146. total_num_refill -= count;
  147. total_count += count;
  148. }
  149. }
  150. DP_STATS_INC(buff_pool->dp_pdev,
  151. rx_refill_buff_pool.num_bufs_refilled,
  152. total_count);
  153. }
  154. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  155. {
  156. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  157. qdf_nbuf_t nbuf = NULL;
  158. uint16_t head, tail;
  159. head = buff_pool->head;
  160. tail = buff_pool->tail;
  161. if (head == tail)
  162. return NULL;
  163. nbuf = buff_pool->buf_elem[tail++];
  164. tail &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
  165. buff_pool->tail = tail;
  166. return nbuf;
  167. }
  168. qdf_nbuf_t
  169. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  170. struct rx_desc_pool *rx_desc_pool,
  171. uint32_t num_available_buffers)
  172. {
  173. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  174. struct rx_buff_pool *buff_pool;
  175. struct dp_srng *dp_rxdma_srng;
  176. qdf_nbuf_t nbuf;
  177. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  178. if (qdf_likely(nbuf)) {
  179. DP_STATS_INC(dp_pdev,
  180. rx_refill_buff_pool.num_bufs_allocated, 1);
  181. return nbuf;
  182. }
  183. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  184. mac_id = dp_pdev->lmac_id;
  185. buff_pool = &soc->rx_buff_pool[mac_id];
  186. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  187. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  188. RX_BUFFER_RESERVATION,
  189. rx_desc_pool->buf_alignment,
  190. FALSE);
  191. if (!buff_pool->is_initialized)
  192. return nbuf;
  193. if (qdf_likely(nbuf)) {
  194. buff_pool->nbuf_fail_cnt = 0;
  195. return nbuf;
  196. }
  197. buff_pool->nbuf_fail_cnt++;
  198. /* Allocate buffer from the buffer pool */
  199. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  200. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  201. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  202. if (nbuf)
  203. DP_STATS_INC(dp_pdev,
  204. rx_buffer_pool.num_pool_bufs_replenish, 1);
  205. }
  206. return nbuf;
  207. }
  208. QDF_STATUS
  209. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  210. struct rx_desc_pool *rx_desc_pool,
  211. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  212. {
  213. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  214. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf))
  215. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  216. (nbuf_frag_info_t->virt_addr).nbuf,
  217. QDF_DMA_FROM_DEVICE,
  218. rx_desc_pool->buf_size);
  219. return ret;
  220. }
  221. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  222. {
  223. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  224. qdf_nbuf_t nbuf;
  225. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  226. QDF_STATUS ret;
  227. uint16_t head = 0;
  228. int i;
  229. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  230. dp_err("RX refill buffer pool support is disabled");
  231. buff_pool->is_initialized = false;
  232. return;
  233. }
  234. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  235. buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  236. buff_pool->tail = 0;
  237. for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
  238. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  239. RX_BUFFER_RESERVATION,
  240. rx_desc_pool->buf_alignment, FALSE);
  241. if (!nbuf)
  242. continue;
  243. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  244. QDF_DMA_FROM_DEVICE,
  245. rx_desc_pool->buf_size);
  246. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  247. qdf_nbuf_free(nbuf);
  248. continue;
  249. }
  250. buff_pool->buf_elem[head] = nbuf;
  251. head++;
  252. }
  253. buff_pool->head = head;
  254. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  255. buff_pool->max_bufq_len,
  256. buff_pool->head);
  257. buff_pool->is_initialized = true;
  258. }
  259. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  260. {
  261. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  262. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  263. qdf_nbuf_t nbuf;
  264. int i;
  265. dp_rx_refill_buff_pool_init(soc, mac_id);
  266. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  267. dp_err("RX buffer pool support is disabled");
  268. buff_pool->is_initialized = false;
  269. return;
  270. }
  271. if (buff_pool->is_initialized)
  272. return;
  273. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  274. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  275. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  276. RX_BUFFER_RESERVATION,
  277. rx_desc_pool->buf_alignment, FALSE);
  278. if (!nbuf)
  279. continue;
  280. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  281. nbuf);
  282. }
  283. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  284. DP_RX_BUFFER_POOL_SIZE,
  285. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  286. buff_pool->is_initialized = true;
  287. }
  288. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  289. {
  290. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  291. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  292. qdf_nbuf_t nbuf;
  293. uint32_t count = 0;
  294. if (!buff_pool->is_initialized)
  295. return;
  296. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  297. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  298. QDF_DMA_BIDIRECTIONAL,
  299. rx_desc_pool->buf_size);
  300. qdf_nbuf_free(nbuf);
  301. count++;
  302. }
  303. dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
  304. count, buff_pool->head, buff_pool->tail);
  305. buff_pool->is_initialized = false;
  306. }
  307. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  308. {
  309. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  310. qdf_nbuf_t nbuf;
  311. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  312. if (!buff_pool->is_initialized)
  313. return;
  314. dp_info("buffers in the RX buffer pool during deinit: %u",
  315. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  316. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  317. qdf_nbuf_free(nbuf);
  318. buff_pool->is_initialized = false;
  319. }
  320. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */