dp_rx_buffer_pool.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #include "dp_ipa.h"
  20. #ifndef DP_RX_BUFFER_POOL_SIZE
  21. #define DP_RX_BUFFER_POOL_SIZE 128
  22. #endif
  23. #ifndef DP_RX_REFILL_BUFF_POOL_SIZE
  24. #define DP_RX_REFILL_BUFF_POOL_SIZE 2048
  25. #endif
  26. #ifndef DP_RX_REFILL_BUFF_POOL_BURST
  27. #define DP_RX_REFILL_BUFF_POOL_BURST 64
  28. #endif
  29. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  30. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  31. #endif
  32. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  33. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  34. {
  35. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  36. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  37. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  38. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  39. bool consumed = false;
  40. if (!bufpool->is_initialized || !pdev)
  41. return consumed;
  42. /* process only buffers of RXDMA ring */
  43. if (qdf_unlikely(rx_desc_pool !=
  44. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  45. return consumed;
  46. first_nbuf = nbuf;
  47. while (nbuf) {
  48. next_nbuf = qdf_nbuf_next(nbuf);
  49. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  50. DP_RX_BUFFER_POOL_SIZE))
  51. break;
  52. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  53. RX_BUFFER_RESERVATION,
  54. rx_desc_pool->buf_alignment,
  55. FALSE);
  56. /* Failed to allocate new nbuf, reset and place it back
  57. * in to the pool.
  58. */
  59. if (!refill_nbuf) {
  60. DP_STATS_INC(pdev,
  61. rx_buffer_pool.num_bufs_consumed, 1);
  62. consumed = true;
  63. break;
  64. }
  65. /* Successful allocation!! */
  66. DP_STATS_INC(pdev,
  67. rx_buffer_pool.num_bufs_alloc_success, 1);
  68. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  69. refill_nbuf);
  70. nbuf = next_nbuf;
  71. }
  72. nbuf = first_nbuf;
  73. if (consumed) {
  74. /* Free the MSDU/scattered MSDU */
  75. while (nbuf) {
  76. next_nbuf = qdf_nbuf_next(nbuf);
  77. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  78. nbuf = next_nbuf;
  79. }
  80. }
  81. return consumed;
  82. }
  83. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  84. {
  85. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  86. struct rx_desc_pool *rx_desc_pool;
  87. struct rx_buff_pool *buff_pool;
  88. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  89. mac_id = dp_pdev->lmac_id;
  90. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  91. buff_pool = &soc->rx_buff_pool[mac_id];
  92. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  93. DP_RX_BUFFER_POOL_SIZE) ||
  94. !buff_pool->is_initialized)
  95. return qdf_nbuf_free(nbuf);
  96. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  97. rx_desc_pool->buf_alignment);
  98. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  99. }
  100. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  101. {
  102. struct rx_desc_pool *rx_desc_pool;
  103. struct rx_refill_buff_pool *buff_pool;
  104. struct dp_pdev *dp_pdev;
  105. qdf_nbuf_t nbuf;
  106. QDF_STATUS ret;
  107. int count, i;
  108. qdf_nbuf_t nbuf_head;
  109. qdf_nbuf_t nbuf_tail;
  110. uint32_t num_req_refill;
  111. if (!soc)
  112. return;
  113. buff_pool = &soc->rx_refill_buff_pool;
  114. if (!buff_pool->is_initialized)
  115. return;
  116. rx_desc_pool = &soc->rx_desc_buf[0];
  117. dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  118. num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
  119. while (num_req_refill) {
  120. if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  121. num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  122. count = 0;
  123. nbuf_head = NULL;
  124. nbuf_tail = NULL;
  125. for (i = 0; i < num_req_refill; i++) {
  126. nbuf = qdf_nbuf_alloc(soc->osdev,
  127. rx_desc_pool->buf_size,
  128. RX_BUFFER_RESERVATION,
  129. rx_desc_pool->buf_alignment,
  130. FALSE);
  131. if (!nbuf)
  132. continue;
  133. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  134. QDF_DMA_FROM_DEVICE,
  135. rx_desc_pool->buf_size);
  136. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  137. qdf_nbuf_free(nbuf);
  138. continue;
  139. }
  140. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
  141. count++;
  142. }
  143. if (count) {
  144. qdf_spin_lock_bh(&buff_pool->bufq_lock);
  145. DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
  146. buff_pool->buf_tail,
  147. nbuf_head, nbuf_tail);
  148. buff_pool->bufq_len += count;
  149. num_req_refill = buff_pool->max_bufq_len -
  150. buff_pool->bufq_len;
  151. qdf_spin_unlock_bh(&buff_pool->bufq_lock);
  152. DP_STATS_INC(dp_pdev,
  153. rx_refill_buff_pool.num_bufs_refilled,
  154. count);
  155. }
  156. }
  157. }
  158. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  159. {
  160. qdf_nbuf_t nbuf = NULL;
  161. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  162. if (!buff_pool->in_rx_refill_lock || !buff_pool->bufq_len)
  163. return nbuf;
  164. nbuf = buff_pool->buf_head;
  165. buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
  166. qdf_nbuf_set_next(nbuf, NULL);
  167. buff_pool->bufq_len--;
  168. return nbuf;
  169. }
  170. qdf_nbuf_t
  171. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  172. struct rx_desc_pool *rx_desc_pool,
  173. uint32_t num_available_buffers)
  174. {
  175. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  176. struct rx_buff_pool *buff_pool;
  177. struct dp_srng *dp_rxdma_srng;
  178. qdf_nbuf_t nbuf;
  179. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  180. if (qdf_likely(nbuf)) {
  181. DP_STATS_INC(dp_pdev,
  182. rx_refill_buff_pool.num_bufs_allocated, 1);
  183. return nbuf;
  184. }
  185. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  186. mac_id = dp_pdev->lmac_id;
  187. buff_pool = &soc->rx_buff_pool[mac_id];
  188. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  189. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  190. RX_BUFFER_RESERVATION,
  191. rx_desc_pool->buf_alignment,
  192. FALSE);
  193. if (!buff_pool->is_initialized)
  194. return nbuf;
  195. if (qdf_likely(nbuf)) {
  196. buff_pool->nbuf_fail_cnt = 0;
  197. return nbuf;
  198. }
  199. buff_pool->nbuf_fail_cnt++;
  200. /* Allocate buffer from the buffer pool */
  201. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  202. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  203. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  204. if (nbuf)
  205. DP_STATS_INC(dp_pdev,
  206. rx_buffer_pool.num_pool_bufs_replenish, 1);
  207. }
  208. return nbuf;
  209. }
  210. QDF_STATUS
  211. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  212. struct rx_desc_pool *rx_desc_pool,
  213. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  214. {
  215. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  216. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf))
  217. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  218. (nbuf_frag_info_t->virt_addr).nbuf,
  219. QDF_DMA_FROM_DEVICE,
  220. rx_desc_pool->buf_size);
  221. return ret;
  222. }
  223. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  224. {
  225. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  226. qdf_nbuf_t nbuf;
  227. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  228. QDF_STATUS ret;
  229. int i;
  230. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  231. dp_err("RX refill buffer pool support is disabled");
  232. buff_pool->is_initialized = false;
  233. return;
  234. }
  235. buff_pool->bufq_len = 0;
  236. buff_pool->buf_head = NULL;
  237. buff_pool->buf_tail = NULL;
  238. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  239. qdf_spinlock_create(&buff_pool->bufq_lock);
  240. for (i = 0; i < buff_pool->max_bufq_len; i++) {
  241. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  242. RX_BUFFER_RESERVATION,
  243. rx_desc_pool->buf_alignment, FALSE);
  244. if (!nbuf)
  245. continue;
  246. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  247. QDF_DMA_FROM_DEVICE,
  248. rx_desc_pool->buf_size);
  249. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  250. qdf_nbuf_free(nbuf);
  251. continue;
  252. }
  253. DP_RX_LIST_APPEND(buff_pool->buf_head,
  254. buff_pool->buf_tail, nbuf);
  255. buff_pool->bufq_len++;
  256. }
  257. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  258. buff_pool->max_bufq_len,
  259. buff_pool->bufq_len);
  260. buff_pool->is_initialized = true;
  261. }
  262. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  263. {
  264. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  265. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  266. qdf_nbuf_t nbuf;
  267. int i;
  268. dp_rx_refill_buff_pool_init(soc, mac_id);
  269. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  270. dp_err("RX buffer pool support is disabled");
  271. buff_pool->is_initialized = false;
  272. return;
  273. }
  274. if (buff_pool->is_initialized)
  275. return;
  276. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  277. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  278. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  279. RX_BUFFER_RESERVATION,
  280. rx_desc_pool->buf_alignment, FALSE);
  281. if (!nbuf)
  282. continue;
  283. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  284. nbuf);
  285. }
  286. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  287. DP_RX_BUFFER_POOL_SIZE,
  288. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  289. buff_pool->is_initialized = true;
  290. }
  291. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  292. {
  293. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  294. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  295. qdf_nbuf_t nbuf;
  296. uint32_t count = 0;
  297. if (!buff_pool->is_initialized)
  298. return;
  299. buff_pool->in_rx_refill_lock = true;
  300. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  301. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  302. QDF_DMA_BIDIRECTIONAL,
  303. rx_desc_pool->buf_size);
  304. qdf_nbuf_free(nbuf);
  305. count++;
  306. }
  307. buff_pool->in_rx_refill_lock = false;
  308. dp_info("Rx refill buffers freed during deinit %u qlen: %u",
  309. count, buff_pool->bufq_len);
  310. qdf_spinlock_destroy(&buff_pool->bufq_lock);
  311. buff_pool->is_initialized = false;
  312. }
  313. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  314. {
  315. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  316. qdf_nbuf_t nbuf;
  317. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  318. if (!buff_pool->is_initialized)
  319. return;
  320. dp_info("buffers in the RX buffer pool during deinit: %u",
  321. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  322. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  323. qdf_nbuf_free(nbuf);
  324. buff_pool->is_initialized = false;
  325. }
  326. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */