dp_rx_buffer_pool.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * Copyright (c) 2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #ifndef DP_RX_BUFFER_POOL_SIZE
  20. #define DP_RX_BUFFER_POOL_SIZE 128
  21. #endif
  22. #ifndef DP_RX_BUFFER_POOL_ALLOC_THRES
  23. #define DP_RX_BUFFER_POOL_ALLOC_THRES 1
  24. #endif
  25. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  26. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  27. {
  28. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  29. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  30. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  31. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  32. bool consumed = false;
  33. if (!bufpool->is_initialized)
  34. return consumed;
  35. /* process only buffers of RXDMA ring */
  36. if (qdf_unlikely(rx_desc_pool !=
  37. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  38. return consumed;
  39. first_nbuf = nbuf;
  40. while (nbuf) {
  41. next_nbuf = qdf_nbuf_next(nbuf);
  42. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  43. DP_RX_BUFFER_POOL_SIZE))
  44. break;
  45. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  46. RX_BUFFER_RESERVATION,
  47. rx_desc_pool->buf_alignment,
  48. FALSE);
  49. /* Failed to allocate new nbuf, reset and place it back
  50. * in to the pool.
  51. */
  52. if (!refill_nbuf) {
  53. DP_STATS_INC(pdev,
  54. rx_buffer_pool.num_bufs_consumed, 1);
  55. consumed = true;
  56. break;
  57. }
  58. /* Successful allocation!! */
  59. DP_STATS_INC(pdev,
  60. rx_buffer_pool.num_bufs_alloc_success, 1);
  61. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  62. refill_nbuf);
  63. nbuf = next_nbuf;
  64. }
  65. nbuf = first_nbuf;
  66. if (consumed) {
  67. /* Free the MSDU/scattered MSDU */
  68. while (nbuf) {
  69. next_nbuf = qdf_nbuf_next(nbuf);
  70. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  71. nbuf = next_nbuf;
  72. }
  73. }
  74. return consumed;
  75. }
  76. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  77. {
  78. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  79. struct rx_desc_pool *rx_desc_pool;
  80. struct rx_buff_pool *buff_pool;
  81. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  82. mac_id = dp_pdev->lmac_id;
  83. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  84. buff_pool = &soc->rx_buff_pool[mac_id];
  85. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  86. DP_RX_BUFFER_POOL_SIZE))
  87. return qdf_nbuf_free(nbuf);
  88. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  89. rx_desc_pool->buf_alignment);
  90. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  91. }
  92. qdf_nbuf_t
  93. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  94. struct rx_desc_pool *rx_desc_pool,
  95. uint32_t num_available_buffers)
  96. {
  97. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  98. struct rx_buff_pool *buff_pool;
  99. struct dp_srng *dp_rxdma_srng;
  100. qdf_nbuf_t nbuf;
  101. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  102. mac_id = dp_pdev->lmac_id;
  103. buff_pool = &soc->rx_buff_pool[mac_id];
  104. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  105. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  106. RX_BUFFER_RESERVATION,
  107. rx_desc_pool->buf_alignment,
  108. FALSE);
  109. if (!buff_pool->is_initialized)
  110. return nbuf;
  111. if (qdf_likely(nbuf)) {
  112. buff_pool->nbuf_fail_cnt = 0;
  113. return nbuf;
  114. }
  115. buff_pool->nbuf_fail_cnt++;
  116. /* Allocate buffer from the buffer pool */
  117. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  118. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  119. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  120. if (nbuf)
  121. DP_STATS_INC(dp_pdev,
  122. rx_buffer_pool.num_pool_bufs_replenish, 1);
  123. }
  124. return nbuf;
  125. }
  126. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  127. {
  128. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  129. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  130. qdf_nbuf_t nbuf;
  131. int i;
  132. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  133. dp_err("RX buffer pool support is disabled");
  134. buff_pool->is_initialized = false;
  135. return;
  136. }
  137. if (buff_pool->is_initialized)
  138. return;
  139. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  140. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  141. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  142. RX_BUFFER_RESERVATION,
  143. rx_desc_pool->buf_alignment, FALSE);
  144. if (!nbuf)
  145. continue;
  146. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  147. nbuf);
  148. }
  149. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  150. DP_RX_BUFFER_POOL_SIZE,
  151. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  152. buff_pool->is_initialized = true;
  153. }
  154. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  155. {
  156. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  157. qdf_nbuf_t nbuf;
  158. if (!buff_pool->is_initialized)
  159. return;
  160. dp_info("buffers in the RX buffer pool during deinit: %u",
  161. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  162. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  163. qdf_nbuf_free(nbuf);
  164. buff_pool->is_initialized = false;
  165. }
  166. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */