dp_rx_buffer_pool.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #include "dp_ipa.h"
  20. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  21. #include "dp_mon.h"
  22. #include "dp_rx_mon.h"
  23. #endif
  24. #ifndef DP_RX_BUFFER_POOL_SIZE
  25. #define DP_RX_BUFFER_POOL_SIZE 128
  26. #endif
  27. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  28. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  29. #endif
  30. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  31. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  32. {
  33. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  34. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  35. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  36. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  37. bool consumed = false;
  38. if (!bufpool->is_initialized || !pdev)
  39. return consumed;
  40. /* process only buffers of RXDMA ring */
  41. if (qdf_unlikely(rx_desc_pool !=
  42. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  43. return consumed;
  44. first_nbuf = nbuf;
  45. while (nbuf) {
  46. next_nbuf = qdf_nbuf_next(nbuf);
  47. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  48. DP_RX_BUFFER_POOL_SIZE))
  49. break;
  50. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  51. RX_BUFFER_RESERVATION,
  52. rx_desc_pool->buf_alignment,
  53. FALSE);
  54. /* Failed to allocate new nbuf, reset and place it back
  55. * in to the pool.
  56. */
  57. if (!refill_nbuf) {
  58. DP_STATS_INC(pdev,
  59. rx_buffer_pool.num_bufs_consumed, 1);
  60. consumed = true;
  61. break;
  62. }
  63. /* Successful allocation!! */
  64. DP_STATS_INC(pdev,
  65. rx_buffer_pool.num_bufs_alloc_success, 1);
  66. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  67. refill_nbuf);
  68. nbuf = next_nbuf;
  69. }
  70. nbuf = first_nbuf;
  71. if (consumed) {
  72. /* Free the MSDU/scattered MSDU */
  73. while (nbuf) {
  74. next_nbuf = qdf_nbuf_next(nbuf);
  75. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  76. nbuf = next_nbuf;
  77. }
  78. }
  79. return consumed;
  80. }
  81. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  82. {
  83. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  84. struct rx_desc_pool *rx_desc_pool;
  85. struct rx_buff_pool *buff_pool;
  86. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  87. mac_id = dp_pdev->lmac_id;
  88. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  89. buff_pool = &soc->rx_buff_pool[mac_id];
  90. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  91. DP_RX_BUFFER_POOL_SIZE) ||
  92. !buff_pool->is_initialized)
  93. return qdf_nbuf_free(nbuf);
  94. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  95. rx_desc_pool->buf_alignment);
  96. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  97. }
  98. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  99. {
  100. struct rx_desc_pool *rx_desc_pool;
  101. struct rx_refill_buff_pool *buff_pool;
  102. qdf_device_t dev;
  103. qdf_nbuf_t nbuf;
  104. QDF_STATUS ret;
  105. int count, i;
  106. uint16_t num_refill;
  107. uint16_t total_num_refill;
  108. uint16_t total_count = 0;
  109. uint16_t head, tail;
  110. if (!soc)
  111. return;
  112. dev = soc->osdev;
  113. buff_pool = &soc->rx_refill_buff_pool;
  114. rx_desc_pool = &soc->rx_desc_buf[0];
  115. if (!buff_pool->is_initialized)
  116. return;
  117. head = buff_pool->head;
  118. tail = buff_pool->tail;
  119. if (tail > head)
  120. total_num_refill = (tail - head - 1);
  121. else
  122. total_num_refill = (DP_RX_REFILL_BUFF_POOL_SIZE - head +
  123. tail - 1);
  124. while (total_num_refill) {
  125. if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  126. num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  127. else
  128. num_refill = total_num_refill;
  129. count = 0;
  130. for (i = 0; i < num_refill; i++) {
  131. nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
  132. RX_BUFFER_RESERVATION,
  133. rx_desc_pool->buf_alignment,
  134. FALSE);
  135. if (qdf_unlikely(!nbuf))
  136. continue;
  137. ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
  138. QDF_DMA_FROM_DEVICE,
  139. rx_desc_pool->buf_size);
  140. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  141. qdf_nbuf_free(nbuf);
  142. continue;
  143. }
  144. buff_pool->buf_elem[head++] = nbuf;
  145. head &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
  146. count++;
  147. }
  148. if (count) {
  149. buff_pool->head = head;
  150. total_num_refill -= count;
  151. total_count += count;
  152. }
  153. }
  154. DP_STATS_INC(buff_pool->dp_pdev,
  155. rx_refill_buff_pool.num_bufs_refilled,
  156. total_count);
  157. }
  158. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  159. {
  160. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  161. qdf_nbuf_t nbuf = NULL;
  162. uint16_t head, tail;
  163. head = buff_pool->head;
  164. tail = buff_pool->tail;
  165. if (head == tail)
  166. return NULL;
  167. nbuf = buff_pool->buf_elem[tail++];
  168. tail &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
  169. buff_pool->tail = tail;
  170. return nbuf;
  171. }
  172. qdf_nbuf_t
  173. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  174. struct rx_desc_pool *rx_desc_pool,
  175. uint32_t num_available_buffers)
  176. {
  177. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  178. struct rx_buff_pool *buff_pool;
  179. struct dp_srng *dp_rxdma_srng;
  180. qdf_nbuf_t nbuf;
  181. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  182. if (qdf_likely(nbuf)) {
  183. DP_STATS_INC(dp_pdev,
  184. rx_refill_buff_pool.num_bufs_allocated, 1);
  185. return nbuf;
  186. }
  187. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  188. mac_id = dp_pdev->lmac_id;
  189. buff_pool = &soc->rx_buff_pool[mac_id];
  190. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  191. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  192. RX_BUFFER_RESERVATION,
  193. rx_desc_pool->buf_alignment,
  194. FALSE);
  195. if (!buff_pool->is_initialized)
  196. return nbuf;
  197. if (qdf_likely(nbuf)) {
  198. buff_pool->nbuf_fail_cnt = 0;
  199. return nbuf;
  200. }
  201. buff_pool->nbuf_fail_cnt++;
  202. /* Allocate buffer from the buffer pool */
  203. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  204. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  205. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  206. if (nbuf)
  207. DP_STATS_INC(dp_pdev,
  208. rx_buffer_pool.num_pool_bufs_replenish, 1);
  209. }
  210. return nbuf;
  211. }
  212. QDF_STATUS
  213. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  214. struct rx_desc_pool *rx_desc_pool,
  215. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  216. {
  217. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  218. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf))
  219. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  220. (nbuf_frag_info_t->virt_addr).nbuf,
  221. QDF_DMA_FROM_DEVICE,
  222. rx_desc_pool->buf_size);
  223. return ret;
  224. }
  225. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  226. {
  227. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  228. qdf_nbuf_t nbuf;
  229. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  230. QDF_STATUS ret;
  231. uint16_t head = 0;
  232. int i;
  233. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  234. dp_err("RX refill buffer pool support is disabled");
  235. buff_pool->is_initialized = false;
  236. return;
  237. }
  238. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  239. buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  240. buff_pool->tail = 0;
  241. for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
  242. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  243. RX_BUFFER_RESERVATION,
  244. rx_desc_pool->buf_alignment, FALSE);
  245. if (!nbuf)
  246. continue;
  247. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  248. QDF_DMA_FROM_DEVICE,
  249. rx_desc_pool->buf_size);
  250. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  251. qdf_nbuf_free(nbuf);
  252. continue;
  253. }
  254. buff_pool->buf_elem[head] = nbuf;
  255. head++;
  256. }
  257. buff_pool->head = head;
  258. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  259. buff_pool->max_bufq_len,
  260. buff_pool->head);
  261. buff_pool->is_initialized = true;
  262. }
  263. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  264. {
  265. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  266. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  267. qdf_nbuf_t nbuf;
  268. int i;
  269. dp_rx_refill_buff_pool_init(soc, mac_id);
  270. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  271. dp_err("RX buffer pool support is disabled");
  272. buff_pool->is_initialized = false;
  273. return;
  274. }
  275. if (buff_pool->is_initialized)
  276. return;
  277. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  278. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  279. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  280. RX_BUFFER_RESERVATION,
  281. rx_desc_pool->buf_alignment, FALSE);
  282. if (!nbuf)
  283. continue;
  284. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  285. nbuf);
  286. }
  287. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  288. DP_RX_BUFFER_POOL_SIZE,
  289. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  290. buff_pool->is_initialized = true;
  291. }
  292. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  293. {
  294. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  295. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  296. qdf_nbuf_t nbuf;
  297. uint32_t count = 0;
  298. if (!buff_pool->is_initialized)
  299. return;
  300. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  301. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  302. QDF_DMA_BIDIRECTIONAL,
  303. rx_desc_pool->buf_size);
  304. qdf_nbuf_free(nbuf);
  305. count++;
  306. }
  307. dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
  308. count, buff_pool->head, buff_pool->tail);
  309. buff_pool->is_initialized = false;
  310. }
  311. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  312. {
  313. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  314. qdf_nbuf_t nbuf;
  315. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  316. if (!buff_pool->is_initialized)
  317. return;
  318. dp_info("buffers in the RX buffer pool during deinit: %u",
  319. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  320. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  321. qdf_nbuf_free(nbuf);
  322. buff_pool->is_initialized = false;
  323. }
  324. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */