dp_rx_buffer_pool.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #include "dp_ipa.h"
  20. #ifndef DP_RX_BUFFER_POOL_SIZE
  21. #define DP_RX_BUFFER_POOL_SIZE 128
  22. #endif
  23. #ifndef DP_RX_REFILL_BUFF_POOL_SIZE
  24. #define DP_RX_REFILL_BUFF_POOL_SIZE 2048
  25. #endif
  26. #ifndef DP_RX_REFILL_BUFF_POOL_BURST
  27. #define DP_RX_REFILL_BUFF_POOL_BURST 64
  28. #endif
  29. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  30. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  31. #endif
  32. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  33. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  34. {
  35. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  36. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  37. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  38. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  39. bool consumed = false;
  40. if (!bufpool->is_initialized || !pdev)
  41. return consumed;
  42. /* process only buffers of RXDMA ring */
  43. if (qdf_unlikely(rx_desc_pool !=
  44. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  45. return consumed;
  46. first_nbuf = nbuf;
  47. while (nbuf) {
  48. next_nbuf = qdf_nbuf_next(nbuf);
  49. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  50. DP_RX_BUFFER_POOL_SIZE))
  51. break;
  52. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  53. RX_BUFFER_RESERVATION,
  54. rx_desc_pool->buf_alignment,
  55. FALSE);
  56. /* Failed to allocate new nbuf, reset and place it back
  57. * in to the pool.
  58. */
  59. if (!refill_nbuf) {
  60. DP_STATS_INC(pdev,
  61. rx_buffer_pool.num_bufs_consumed, 1);
  62. consumed = true;
  63. break;
  64. }
  65. /* Successful allocation!! */
  66. DP_STATS_INC(pdev,
  67. rx_buffer_pool.num_bufs_alloc_success, 1);
  68. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  69. refill_nbuf);
  70. nbuf = next_nbuf;
  71. }
  72. nbuf = first_nbuf;
  73. if (consumed) {
  74. /* Free the MSDU/scattered MSDU */
  75. while (nbuf) {
  76. next_nbuf = qdf_nbuf_next(nbuf);
  77. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  78. nbuf = next_nbuf;
  79. }
  80. }
  81. return consumed;
  82. }
  83. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  84. {
  85. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  86. struct rx_desc_pool *rx_desc_pool;
  87. struct rx_buff_pool *buff_pool;
  88. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  89. mac_id = dp_pdev->lmac_id;
  90. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  91. buff_pool = &soc->rx_buff_pool[mac_id];
  92. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  93. DP_RX_BUFFER_POOL_SIZE))
  94. return qdf_nbuf_free(nbuf);
  95. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  96. rx_desc_pool->buf_alignment);
  97. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  98. }
  99. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  100. {
  101. struct rx_desc_pool *rx_desc_pool;
  102. struct rx_refill_buff_pool *buff_pool;
  103. struct dp_pdev *dp_pdev;
  104. qdf_nbuf_t nbuf;
  105. QDF_STATUS ret;
  106. int count, i;
  107. qdf_nbuf_t nbuf_head;
  108. qdf_nbuf_t nbuf_tail;
  109. uint32_t num_req_refill;
  110. if (!soc)
  111. return;
  112. buff_pool = &soc->rx_refill_buff_pool;
  113. if (!buff_pool->is_initialized)
  114. return;
  115. rx_desc_pool = &soc->rx_desc_buf[0];
  116. dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  117. num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
  118. while (num_req_refill) {
  119. if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  120. num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  121. count = 0;
  122. nbuf_head = NULL;
  123. nbuf_tail = NULL;
  124. for (i = 0; i < num_req_refill; i++) {
  125. nbuf = qdf_nbuf_alloc(soc->osdev,
  126. rx_desc_pool->buf_size,
  127. RX_BUFFER_RESERVATION,
  128. rx_desc_pool->buf_alignment,
  129. FALSE);
  130. if (!nbuf)
  131. continue;
  132. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  133. QDF_DMA_FROM_DEVICE,
  134. rx_desc_pool->buf_size);
  135. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  136. qdf_nbuf_free(nbuf);
  137. continue;
  138. }
  139. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  140. rx_desc_pool->buf_size,
  141. true);
  142. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
  143. count++;
  144. }
  145. if (count) {
  146. qdf_spin_lock_bh(&buff_pool->bufq_lock);
  147. DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
  148. buff_pool->buf_tail,
  149. nbuf_head, nbuf_tail);
  150. buff_pool->bufq_len += count;
  151. num_req_refill = buff_pool->max_bufq_len -
  152. buff_pool->bufq_len;
  153. qdf_spin_unlock_bh(&buff_pool->bufq_lock);
  154. DP_STATS_INC(dp_pdev,
  155. rx_refill_buff_pool.num_bufs_refilled,
  156. count);
  157. }
  158. }
  159. }
  160. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  161. {
  162. qdf_nbuf_t nbuf = NULL;
  163. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  164. if (!buff_pool->is_initialized || !buff_pool->bufq_len)
  165. return nbuf;
  166. qdf_spin_lock_bh(&buff_pool->bufq_lock);
  167. nbuf = buff_pool->buf_head;
  168. buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
  169. qdf_nbuf_set_next(nbuf, NULL);
  170. buff_pool->bufq_len--;
  171. qdf_spin_unlock_bh(&buff_pool->bufq_lock);
  172. return nbuf;
  173. }
  174. qdf_nbuf_t
  175. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  176. struct rx_desc_pool *rx_desc_pool,
  177. uint32_t num_available_buffers)
  178. {
  179. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  180. struct rx_buff_pool *buff_pool;
  181. struct dp_srng *dp_rxdma_srng;
  182. qdf_nbuf_t nbuf;
  183. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  184. if (nbuf) {
  185. DP_STATS_INC(dp_pdev,
  186. rx_refill_buff_pool.num_bufs_allocated, 1);
  187. return nbuf;
  188. }
  189. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  190. mac_id = dp_pdev->lmac_id;
  191. buff_pool = &soc->rx_buff_pool[mac_id];
  192. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  193. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  194. RX_BUFFER_RESERVATION,
  195. rx_desc_pool->buf_alignment,
  196. FALSE);
  197. if (!buff_pool->is_initialized)
  198. return nbuf;
  199. if (qdf_likely(nbuf)) {
  200. buff_pool->nbuf_fail_cnt = 0;
  201. return nbuf;
  202. }
  203. buff_pool->nbuf_fail_cnt++;
  204. /* Allocate buffer from the buffer pool */
  205. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  206. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  207. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  208. if (nbuf)
  209. DP_STATS_INC(dp_pdev,
  210. rx_buffer_pool.num_pool_bufs_replenish, 1);
  211. }
  212. return nbuf;
  213. }
  214. QDF_STATUS
  215. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  216. struct rx_desc_pool *rx_desc_pool,
  217. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  218. {
  219. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  220. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
  221. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  222. (nbuf_frag_info_t->virt_addr).nbuf,
  223. QDF_DMA_FROM_DEVICE,
  224. rx_desc_pool->buf_size);
  225. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  226. return ret;
  227. dp_ipa_handle_rx_buf_smmu_mapping(soc,
  228. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  229. rx_desc_pool->buf_size,
  230. true);
  231. }
  232. return ret;
  233. }
  234. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  235. {
  236. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  237. qdf_nbuf_t nbuf;
  238. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  239. QDF_STATUS ret;
  240. int i;
  241. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  242. dp_err("RX refill buffer pool support is disabled");
  243. buff_pool->is_initialized = false;
  244. return;
  245. }
  246. buff_pool->bufq_len = 0;
  247. buff_pool->buf_head = NULL;
  248. buff_pool->buf_tail = NULL;
  249. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  250. qdf_spinlock_create(&buff_pool->bufq_lock);
  251. for (i = 0; i < buff_pool->max_bufq_len; i++) {
  252. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  253. RX_BUFFER_RESERVATION,
  254. rx_desc_pool->buf_alignment, FALSE);
  255. if (!nbuf)
  256. continue;
  257. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  258. QDF_DMA_FROM_DEVICE,
  259. rx_desc_pool->buf_size);
  260. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  261. qdf_nbuf_free(nbuf);
  262. continue;
  263. }
  264. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  265. rx_desc_pool->buf_size,
  266. true);
  267. DP_RX_LIST_APPEND(buff_pool->buf_head,
  268. buff_pool->buf_tail, nbuf);
  269. buff_pool->bufq_len++;
  270. }
  271. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  272. buff_pool->max_bufq_len,
  273. buff_pool->bufq_len);
  274. buff_pool->is_initialized = true;
  275. }
  276. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  277. {
  278. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  279. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  280. qdf_nbuf_t nbuf;
  281. int i;
  282. dp_rx_refill_buff_pool_init(soc, mac_id);
  283. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  284. dp_err("RX buffer pool support is disabled");
  285. buff_pool->is_initialized = false;
  286. return;
  287. }
  288. if (buff_pool->is_initialized)
  289. return;
  290. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  291. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  292. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  293. RX_BUFFER_RESERVATION,
  294. rx_desc_pool->buf_alignment, FALSE);
  295. if (!nbuf)
  296. continue;
  297. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  298. nbuf);
  299. }
  300. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  301. DP_RX_BUFFER_POOL_SIZE,
  302. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  303. buff_pool->is_initialized = true;
  304. }
  305. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  306. {
  307. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  308. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  309. qdf_nbuf_t nbuf;
  310. if (!buff_pool->is_initialized)
  311. return;
  312. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  313. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  314. rx_desc_pool->buf_size,
  315. false);
  316. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  317. QDF_DMA_BIDIRECTIONAL,
  318. rx_desc_pool->buf_size);
  319. qdf_nbuf_free(nbuf);
  320. }
  321. buff_pool->is_initialized = false;
  322. }
  323. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  324. {
  325. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  326. qdf_nbuf_t nbuf;
  327. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  328. if (!buff_pool->is_initialized)
  329. return;
  330. dp_info("buffers in the RX buffer pool during deinit: %u",
  331. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  332. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  333. qdf_nbuf_free(nbuf);
  334. buff_pool->is_initialized = false;
  335. }
  336. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */