dp_rx_buffer_pool.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #include "dp_ipa.h"
  20. #ifndef DP_RX_BUFFER_POOL_SIZE
  21. #define DP_RX_BUFFER_POOL_SIZE 128
  22. #endif
  23. #ifndef DP_RX_REFILL_BUFF_POOL_SIZE
  24. #define DP_RX_REFILL_BUFF_POOL_SIZE 2048
  25. #endif
  26. #ifndef DP_RX_REFILL_BUFF_POOL_BURST
  27. #define DP_RX_REFILL_BUFF_POOL_BURST 64
  28. #endif
  29. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  30. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  31. #endif
  32. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  33. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  34. {
  35. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  36. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  37. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  38. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  39. bool consumed = false;
  40. if (!bufpool->is_initialized || !pdev)
  41. return consumed;
  42. /* process only buffers of RXDMA ring */
  43. if (qdf_unlikely(rx_desc_pool !=
  44. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  45. return consumed;
  46. first_nbuf = nbuf;
  47. while (nbuf) {
  48. next_nbuf = qdf_nbuf_next(nbuf);
  49. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  50. DP_RX_BUFFER_POOL_SIZE))
  51. break;
  52. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  53. RX_BUFFER_RESERVATION,
  54. rx_desc_pool->buf_alignment,
  55. FALSE);
  56. /* Failed to allocate new nbuf, reset and place it back
  57. * in to the pool.
  58. */
  59. if (!refill_nbuf) {
  60. DP_STATS_INC(pdev,
  61. rx_buffer_pool.num_bufs_consumed, 1);
  62. consumed = true;
  63. break;
  64. }
  65. /* Successful allocation!! */
  66. DP_STATS_INC(pdev,
  67. rx_buffer_pool.num_bufs_alloc_success, 1);
  68. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  69. refill_nbuf);
  70. nbuf = next_nbuf;
  71. }
  72. nbuf = first_nbuf;
  73. if (consumed) {
  74. /* Free the MSDU/scattered MSDU */
  75. while (nbuf) {
  76. next_nbuf = qdf_nbuf_next(nbuf);
  77. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  78. nbuf = next_nbuf;
  79. }
  80. }
  81. return consumed;
  82. }
  83. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  84. {
  85. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  86. struct rx_desc_pool *rx_desc_pool;
  87. struct rx_buff_pool *buff_pool;
  88. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  89. mac_id = dp_pdev->lmac_id;
  90. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  91. buff_pool = &soc->rx_buff_pool[mac_id];
  92. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  93. DP_RX_BUFFER_POOL_SIZE))
  94. return qdf_nbuf_free(nbuf);
  95. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  96. rx_desc_pool->buf_alignment);
  97. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  98. }
  99. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  100. {
  101. struct rx_desc_pool *rx_desc_pool;
  102. struct rx_refill_buff_pool *buff_pool;
  103. struct dp_pdev *dp_pdev;
  104. qdf_nbuf_t nbuf;
  105. QDF_STATUS ret;
  106. int count, i;
  107. qdf_nbuf_t nbuf_head;
  108. qdf_nbuf_t nbuf_tail;
  109. uint32_t num_req_refill;
  110. if (!soc)
  111. return;
  112. buff_pool = &soc->rx_refill_buff_pool;
  113. if (!buff_pool->is_initialized)
  114. return;
  115. rx_desc_pool = &soc->rx_desc_buf[0];
  116. dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  117. num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
  118. while (num_req_refill) {
  119. if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  120. num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  121. count = 0;
  122. nbuf_head = NULL;
  123. nbuf_tail = NULL;
  124. for (i = 0; i < num_req_refill; i++) {
  125. nbuf = qdf_nbuf_alloc(soc->osdev,
  126. rx_desc_pool->buf_size,
  127. RX_BUFFER_RESERVATION,
  128. rx_desc_pool->buf_alignment,
  129. FALSE);
  130. if (!nbuf)
  131. continue;
  132. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  133. QDF_DMA_FROM_DEVICE,
  134. rx_desc_pool->buf_size);
  135. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  136. qdf_nbuf_free(nbuf);
  137. continue;
  138. }
  139. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
  140. count++;
  141. }
  142. if (count) {
  143. qdf_spin_lock_bh(&buff_pool->bufq_lock);
  144. DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
  145. buff_pool->buf_tail,
  146. nbuf_head, nbuf_tail);
  147. buff_pool->bufq_len += count;
  148. num_req_refill = buff_pool->max_bufq_len -
  149. buff_pool->bufq_len;
  150. qdf_spin_unlock_bh(&buff_pool->bufq_lock);
  151. DP_STATS_INC(dp_pdev,
  152. rx_refill_buff_pool.num_bufs_refilled,
  153. count);
  154. }
  155. }
  156. }
  157. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  158. {
  159. qdf_nbuf_t nbuf = NULL;
  160. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  161. if (!buff_pool->in_rx_refill_lock || !buff_pool->bufq_len)
  162. return nbuf;
  163. nbuf = buff_pool->buf_head;
  164. buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
  165. qdf_nbuf_set_next(nbuf, NULL);
  166. buff_pool->bufq_len--;
  167. return nbuf;
  168. }
  169. qdf_nbuf_t
  170. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  171. struct rx_desc_pool *rx_desc_pool,
  172. uint32_t num_available_buffers)
  173. {
  174. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  175. struct rx_buff_pool *buff_pool;
  176. struct dp_srng *dp_rxdma_srng;
  177. qdf_nbuf_t nbuf;
  178. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  179. if (qdf_likely(nbuf)) {
  180. DP_STATS_INC(dp_pdev,
  181. rx_refill_buff_pool.num_bufs_allocated, 1);
  182. return nbuf;
  183. }
  184. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  185. mac_id = dp_pdev->lmac_id;
  186. buff_pool = &soc->rx_buff_pool[mac_id];
  187. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  188. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  189. RX_BUFFER_RESERVATION,
  190. rx_desc_pool->buf_alignment,
  191. FALSE);
  192. if (!buff_pool->is_initialized)
  193. return nbuf;
  194. if (qdf_likely(nbuf)) {
  195. buff_pool->nbuf_fail_cnt = 0;
  196. return nbuf;
  197. }
  198. buff_pool->nbuf_fail_cnt++;
  199. /* Allocate buffer from the buffer pool */
  200. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  201. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  202. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  203. if (nbuf)
  204. DP_STATS_INC(dp_pdev,
  205. rx_buffer_pool.num_pool_bufs_replenish, 1);
  206. }
  207. return nbuf;
  208. }
  209. QDF_STATUS
  210. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  211. struct rx_desc_pool *rx_desc_pool,
  212. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  213. {
  214. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  215. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
  216. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  217. (nbuf_frag_info_t->virt_addr).nbuf,
  218. QDF_DMA_FROM_DEVICE,
  219. rx_desc_pool->buf_size);
  220. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  221. return ret;
  222. }
  223. dp_ipa_handle_rx_buf_smmu_mapping(soc,
  224. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  225. rx_desc_pool->buf_size,
  226. true);
  227. return ret;
  228. }
  229. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  230. {
  231. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  232. qdf_nbuf_t nbuf;
  233. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  234. QDF_STATUS ret;
  235. int i;
  236. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  237. dp_err("RX refill buffer pool support is disabled");
  238. buff_pool->is_initialized = false;
  239. return;
  240. }
  241. buff_pool->bufq_len = 0;
  242. buff_pool->buf_head = NULL;
  243. buff_pool->buf_tail = NULL;
  244. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  245. qdf_spinlock_create(&buff_pool->bufq_lock);
  246. for (i = 0; i < buff_pool->max_bufq_len; i++) {
  247. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  248. RX_BUFFER_RESERVATION,
  249. rx_desc_pool->buf_alignment, FALSE);
  250. if (!nbuf)
  251. continue;
  252. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  253. QDF_DMA_FROM_DEVICE,
  254. rx_desc_pool->buf_size);
  255. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  256. qdf_nbuf_free(nbuf);
  257. continue;
  258. }
  259. DP_RX_LIST_APPEND(buff_pool->buf_head,
  260. buff_pool->buf_tail, nbuf);
  261. buff_pool->bufq_len++;
  262. }
  263. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  264. buff_pool->max_bufq_len,
  265. buff_pool->bufq_len);
  266. buff_pool->is_initialized = true;
  267. }
  268. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  269. {
  270. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  271. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  272. qdf_nbuf_t nbuf;
  273. int i;
  274. dp_rx_refill_buff_pool_init(soc, mac_id);
  275. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  276. dp_err("RX buffer pool support is disabled");
  277. buff_pool->is_initialized = false;
  278. return;
  279. }
  280. if (buff_pool->is_initialized)
  281. return;
  282. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  283. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  284. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  285. RX_BUFFER_RESERVATION,
  286. rx_desc_pool->buf_alignment, FALSE);
  287. if (!nbuf)
  288. continue;
  289. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  290. nbuf);
  291. }
  292. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  293. DP_RX_BUFFER_POOL_SIZE,
  294. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  295. buff_pool->is_initialized = true;
  296. }
  297. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  298. {
  299. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  300. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  301. qdf_nbuf_t nbuf;
  302. uint32_t count = 0;
  303. if (!buff_pool->is_initialized)
  304. return;
  305. buff_pool->in_rx_refill_lock = true;
  306. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  307. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  308. QDF_DMA_BIDIRECTIONAL,
  309. rx_desc_pool->buf_size);
  310. qdf_nbuf_free(nbuf);
  311. count++;
  312. }
  313. buff_pool->in_rx_refill_lock = false;
  314. dp_info("Rx refill buffers freed during deinit %u qlen: %u",
  315. count, buff_pool->bufq_len);
  316. qdf_spinlock_destroy(&buff_pool->bufq_lock);
  317. buff_pool->is_initialized = false;
  318. }
  319. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  320. {
  321. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  322. qdf_nbuf_t nbuf;
  323. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  324. if (!buff_pool->is_initialized)
  325. return;
  326. dp_info("buffers in the RX buffer pool during deinit: %u",
  327. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  328. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  329. qdf_nbuf_free(nbuf);
  330. buff_pool->is_initialized = false;
  331. }
  332. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */