dp_rx_buffer_pool.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #include "dp_ipa.h"
  20. #ifndef DP_RX_BUFFER_POOL_SIZE
  21. #define DP_RX_BUFFER_POOL_SIZE 128
  22. #endif
  23. #ifndef DP_RX_REFILL_BUFF_POOL_SIZE
  24. #define DP_RX_REFILL_BUFF_POOL_SIZE 2048
  25. #endif
  26. #ifndef DP_RX_REFILL_BUFF_POOL_BURST
  27. #define DP_RX_REFILL_BUFF_POOL_BURST 64
  28. #endif
  29. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  30. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  31. #endif
  32. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  33. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  34. {
  35. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  36. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  37. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  38. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  39. bool consumed = false;
  40. if (!bufpool->is_initialized || !pdev)
  41. return consumed;
  42. /* process only buffers of RXDMA ring */
  43. if (qdf_unlikely(rx_desc_pool !=
  44. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  45. return consumed;
  46. first_nbuf = nbuf;
  47. while (nbuf) {
  48. next_nbuf = qdf_nbuf_next(nbuf);
  49. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  50. DP_RX_BUFFER_POOL_SIZE))
  51. break;
  52. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  53. RX_BUFFER_RESERVATION,
  54. rx_desc_pool->buf_alignment,
  55. FALSE);
  56. /* Failed to allocate new nbuf, reset and place it back
  57. * in to the pool.
  58. */
  59. if (!refill_nbuf) {
  60. DP_STATS_INC(pdev,
  61. rx_buffer_pool.num_bufs_consumed, 1);
  62. consumed = true;
  63. break;
  64. }
  65. /* Successful allocation!! */
  66. DP_STATS_INC(pdev,
  67. rx_buffer_pool.num_bufs_alloc_success, 1);
  68. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  69. refill_nbuf);
  70. nbuf = next_nbuf;
  71. }
  72. nbuf = first_nbuf;
  73. if (consumed) {
  74. /* Free the MSDU/scattered MSDU */
  75. while (nbuf) {
  76. next_nbuf = qdf_nbuf_next(nbuf);
  77. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  78. nbuf = next_nbuf;
  79. }
  80. }
  81. return consumed;
  82. }
  83. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  84. {
  85. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  86. struct rx_desc_pool *rx_desc_pool;
  87. struct rx_buff_pool *buff_pool;
  88. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  89. mac_id = dp_pdev->lmac_id;
  90. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  91. buff_pool = &soc->rx_buff_pool[mac_id];
  92. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  93. DP_RX_BUFFER_POOL_SIZE))
  94. return qdf_nbuf_free(nbuf);
  95. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  96. rx_desc_pool->buf_alignment);
  97. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  98. }
  99. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  100. {
  101. struct rx_desc_pool *rx_desc_pool;
  102. struct rx_refill_buff_pool *buff_pool;
  103. struct dp_pdev *dp_pdev;
  104. qdf_nbuf_t nbuf;
  105. QDF_STATUS ret;
  106. int count, i;
  107. qdf_nbuf_t nbuf_head;
  108. qdf_nbuf_t nbuf_tail;
  109. uint32_t num_req_refill;
  110. if (!soc)
  111. return;
  112. buff_pool = &soc->rx_refill_buff_pool;
  113. if (!buff_pool->is_initialized)
  114. return;
  115. rx_desc_pool = &soc->rx_desc_buf[0];
  116. dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  117. num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
  118. while (num_req_refill) {
  119. if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  120. num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  121. count = 0;
  122. nbuf_head = NULL;
  123. nbuf_tail = NULL;
  124. for (i = 0; i < num_req_refill; i++) {
  125. nbuf = qdf_nbuf_alloc(soc->osdev,
  126. rx_desc_pool->buf_size,
  127. RX_BUFFER_RESERVATION,
  128. rx_desc_pool->buf_alignment,
  129. FALSE);
  130. if (!nbuf)
  131. continue;
  132. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  133. QDF_DMA_FROM_DEVICE,
  134. rx_desc_pool->buf_size);
  135. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  136. qdf_nbuf_free(nbuf);
  137. continue;
  138. }
  139. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  140. rx_desc_pool->buf_size,
  141. true);
  142. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
  143. count++;
  144. }
  145. if (count) {
  146. qdf_spin_lock_bh(&buff_pool->bufq_lock);
  147. DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
  148. buff_pool->buf_tail,
  149. nbuf_head, nbuf_tail);
  150. buff_pool->bufq_len += count;
  151. num_req_refill = buff_pool->max_bufq_len -
  152. buff_pool->bufq_len;
  153. qdf_spin_unlock_bh(&buff_pool->bufq_lock);
  154. DP_STATS_INC(dp_pdev,
  155. rx_refill_buff_pool.num_bufs_refilled,
  156. count);
  157. }
  158. }
  159. }
  160. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  161. {
  162. qdf_nbuf_t nbuf = NULL;
  163. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  164. if (!buff_pool->in_rx_refill_lock || !buff_pool->bufq_len)
  165. return nbuf;
  166. nbuf = buff_pool->buf_head;
  167. buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
  168. qdf_nbuf_set_next(nbuf, NULL);
  169. buff_pool->bufq_len--;
  170. return nbuf;
  171. }
  172. qdf_nbuf_t
  173. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  174. struct rx_desc_pool *rx_desc_pool,
  175. uint32_t num_available_buffers)
  176. {
  177. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  178. struct rx_buff_pool *buff_pool;
  179. struct dp_srng *dp_rxdma_srng;
  180. qdf_nbuf_t nbuf;
  181. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  182. if (qdf_likely(nbuf)) {
  183. DP_STATS_INC(dp_pdev,
  184. rx_refill_buff_pool.num_bufs_allocated, 1);
  185. return nbuf;
  186. }
  187. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  188. mac_id = dp_pdev->lmac_id;
  189. buff_pool = &soc->rx_buff_pool[mac_id];
  190. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  191. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  192. RX_BUFFER_RESERVATION,
  193. rx_desc_pool->buf_alignment,
  194. FALSE);
  195. if (!buff_pool->is_initialized)
  196. return nbuf;
  197. if (qdf_likely(nbuf)) {
  198. buff_pool->nbuf_fail_cnt = 0;
  199. return nbuf;
  200. }
  201. buff_pool->nbuf_fail_cnt++;
  202. /* Allocate buffer from the buffer pool */
  203. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  204. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  205. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  206. if (nbuf)
  207. DP_STATS_INC(dp_pdev,
  208. rx_buffer_pool.num_pool_bufs_replenish, 1);
  209. }
  210. return nbuf;
  211. }
  212. QDF_STATUS
  213. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  214. struct rx_desc_pool *rx_desc_pool,
  215. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  216. {
  217. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  218. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
  219. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  220. (nbuf_frag_info_t->virt_addr).nbuf,
  221. QDF_DMA_FROM_DEVICE,
  222. rx_desc_pool->buf_size);
  223. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  224. return ret;
  225. dp_ipa_handle_rx_buf_smmu_mapping(soc,
  226. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  227. rx_desc_pool->buf_size,
  228. true);
  229. }
  230. return ret;
  231. }
  232. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  233. {
  234. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  235. qdf_nbuf_t nbuf;
  236. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  237. QDF_STATUS ret;
  238. int i;
  239. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  240. dp_err("RX refill buffer pool support is disabled");
  241. buff_pool->is_initialized = false;
  242. return;
  243. }
  244. buff_pool->bufq_len = 0;
  245. buff_pool->buf_head = NULL;
  246. buff_pool->buf_tail = NULL;
  247. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  248. qdf_spinlock_create(&buff_pool->bufq_lock);
  249. for (i = 0; i < buff_pool->max_bufq_len; i++) {
  250. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  251. RX_BUFFER_RESERVATION,
  252. rx_desc_pool->buf_alignment, FALSE);
  253. if (!nbuf)
  254. continue;
  255. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  256. QDF_DMA_FROM_DEVICE,
  257. rx_desc_pool->buf_size);
  258. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  259. qdf_nbuf_free(nbuf);
  260. continue;
  261. }
  262. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  263. rx_desc_pool->buf_size,
  264. true);
  265. DP_RX_LIST_APPEND(buff_pool->buf_head,
  266. buff_pool->buf_tail, nbuf);
  267. buff_pool->bufq_len++;
  268. }
  269. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  270. buff_pool->max_bufq_len,
  271. buff_pool->bufq_len);
  272. buff_pool->is_initialized = true;
  273. }
  274. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  275. {
  276. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  277. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  278. qdf_nbuf_t nbuf;
  279. int i;
  280. dp_rx_refill_buff_pool_init(soc, mac_id);
  281. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  282. dp_err("RX buffer pool support is disabled");
  283. buff_pool->is_initialized = false;
  284. return;
  285. }
  286. if (buff_pool->is_initialized)
  287. return;
  288. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  289. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  290. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  291. RX_BUFFER_RESERVATION,
  292. rx_desc_pool->buf_alignment, FALSE);
  293. if (!nbuf)
  294. continue;
  295. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  296. nbuf);
  297. }
  298. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  299. DP_RX_BUFFER_POOL_SIZE,
  300. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  301. buff_pool->is_initialized = true;
  302. }
  303. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  304. {
  305. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  306. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  307. qdf_nbuf_t nbuf;
  308. uint32_t count = 0;
  309. if (!buff_pool->is_initialized)
  310. return;
  311. buff_pool->in_rx_refill_lock = true;
  312. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  313. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  314. rx_desc_pool->buf_size,
  315. false);
  316. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  317. QDF_DMA_BIDIRECTIONAL,
  318. rx_desc_pool->buf_size);
  319. qdf_nbuf_free(nbuf);
  320. count++;
  321. }
  322. buff_pool->in_rx_refill_lock = false;
  323. dp_info("Rx refill buffers freed during deinit %u qlen: %u",
  324. count, buff_pool->bufq_len);
  325. qdf_spinlock_destroy(&buff_pool->bufq_lock);
  326. buff_pool->is_initialized = false;
  327. }
  328. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  329. {
  330. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  331. qdf_nbuf_t nbuf;
  332. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  333. if (!buff_pool->is_initialized)
  334. return;
  335. dp_info("buffers in the RX buffer pool during deinit: %u",
  336. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  337. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  338. qdf_nbuf_free(nbuf);
  339. buff_pool->is_initialized = false;
  340. }
  341. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */