dp_rx_buffer_pool.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_rx_buffer_pool.h"
  19. #include "dp_ipa.h"
  20. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  21. #include "dp_rx_mon.h"
  22. #endif
  23. #ifndef DP_RX_BUFFER_POOL_SIZE
  24. #define DP_RX_BUFFER_POOL_SIZE 128
  25. #endif
  26. #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
  27. #define DP_RX_BUFF_POOL_ALLOC_THRES 1
  28. #endif
  29. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  30. bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  31. {
  32. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  33. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  34. struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
  35. qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
  36. bool consumed = false;
  37. if (!bufpool->is_initialized || !pdev)
  38. return consumed;
  39. /* process only buffers of RXDMA ring */
  40. if (qdf_unlikely(rx_desc_pool !=
  41. dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
  42. return consumed;
  43. first_nbuf = nbuf;
  44. while (nbuf) {
  45. next_nbuf = qdf_nbuf_next(nbuf);
  46. if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
  47. DP_RX_BUFFER_POOL_SIZE))
  48. break;
  49. refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  50. RX_BUFFER_RESERVATION,
  51. rx_desc_pool->buf_alignment,
  52. FALSE);
  53. /* Failed to allocate new nbuf, reset and place it back
  54. * in to the pool.
  55. */
  56. if (!refill_nbuf) {
  57. DP_STATS_INC(pdev,
  58. rx_buffer_pool.num_bufs_consumed, 1);
  59. consumed = true;
  60. break;
  61. }
  62. /* Successful allocation!! */
  63. DP_STATS_INC(pdev,
  64. rx_buffer_pool.num_bufs_alloc_success, 1);
  65. qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
  66. refill_nbuf);
  67. nbuf = next_nbuf;
  68. }
  69. nbuf = first_nbuf;
  70. if (consumed) {
  71. /* Free the MSDU/scattered MSDU */
  72. while (nbuf) {
  73. next_nbuf = qdf_nbuf_next(nbuf);
  74. dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
  75. nbuf = next_nbuf;
  76. }
  77. }
  78. return consumed;
  79. }
  80. void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
  81. {
  82. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  83. struct rx_desc_pool *rx_desc_pool;
  84. struct rx_buff_pool *buff_pool;
  85. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  86. mac_id = dp_pdev->lmac_id;
  87. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  88. buff_pool = &soc->rx_buff_pool[mac_id];
  89. if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
  90. DP_RX_BUFFER_POOL_SIZE) ||
  91. !buff_pool->is_initialized)
  92. return qdf_nbuf_free(nbuf);
  93. qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
  94. rx_desc_pool->buf_alignment);
  95. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
  96. }
  97. void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
  98. {
  99. struct rx_desc_pool *rx_desc_pool;
  100. struct rx_refill_buff_pool *buff_pool;
  101. qdf_device_t dev;
  102. qdf_nbuf_t nbuf;
  103. QDF_STATUS ret;
  104. int count, i;
  105. uint16_t num_refill;
  106. uint16_t total_num_refill;
  107. uint16_t total_count = 0;
  108. uint16_t head, tail;
  109. if (!soc)
  110. return;
  111. dev = soc->osdev;
  112. buff_pool = &soc->rx_refill_buff_pool;
  113. rx_desc_pool = &soc->rx_desc_buf[0];
  114. if (!buff_pool->is_initialized)
  115. return;
  116. head = buff_pool->head;
  117. tail = buff_pool->tail;
  118. if (tail > head)
  119. total_num_refill = (tail - head - 1);
  120. else
  121. total_num_refill = (DP_RX_REFILL_BUFF_POOL_SIZE - head +
  122. tail - 1);
  123. while (total_num_refill) {
  124. if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
  125. num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
  126. else
  127. num_refill = total_num_refill;
  128. count = 0;
  129. for (i = 0; i < num_refill; i++) {
  130. nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
  131. RX_BUFFER_RESERVATION,
  132. rx_desc_pool->buf_alignment,
  133. FALSE);
  134. if (qdf_unlikely(!nbuf))
  135. continue;
  136. ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
  137. QDF_DMA_FROM_DEVICE,
  138. rx_desc_pool->buf_size);
  139. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  140. qdf_nbuf_free(nbuf);
  141. continue;
  142. }
  143. buff_pool->buf_elem[head++] = nbuf;
  144. head &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
  145. count++;
  146. }
  147. if (count) {
  148. buff_pool->head = head;
  149. total_num_refill -= count;
  150. total_count += count;
  151. }
  152. }
  153. DP_STATS_INC(buff_pool->dp_pdev,
  154. rx_refill_buff_pool.num_bufs_refilled,
  155. total_count);
  156. }
  157. static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
  158. {
  159. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  160. qdf_nbuf_t nbuf = NULL;
  161. uint16_t head, tail;
  162. head = buff_pool->head;
  163. tail = buff_pool->tail;
  164. if (head == tail)
  165. return NULL;
  166. nbuf = buff_pool->buf_elem[tail++];
  167. tail &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
  168. buff_pool->tail = tail;
  169. return nbuf;
  170. }
  171. qdf_nbuf_t
  172. dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
  173. struct rx_desc_pool *rx_desc_pool,
  174. uint32_t num_available_buffers)
  175. {
  176. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  177. struct rx_buff_pool *buff_pool;
  178. struct dp_srng *dp_rxdma_srng;
  179. qdf_nbuf_t nbuf;
  180. nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
  181. if (qdf_likely(nbuf)) {
  182. DP_STATS_INC(dp_pdev,
  183. rx_refill_buff_pool.num_bufs_allocated, 1);
  184. return nbuf;
  185. }
  186. if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  187. mac_id = dp_pdev->lmac_id;
  188. buff_pool = &soc->rx_buff_pool[mac_id];
  189. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  190. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  191. RX_BUFFER_RESERVATION,
  192. rx_desc_pool->buf_alignment,
  193. FALSE);
  194. if (!buff_pool->is_initialized)
  195. return nbuf;
  196. if (qdf_likely(nbuf)) {
  197. buff_pool->nbuf_fail_cnt = 0;
  198. return nbuf;
  199. }
  200. buff_pool->nbuf_fail_cnt++;
  201. /* Allocate buffer from the buffer pool */
  202. if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
  203. (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
  204. nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
  205. if (nbuf)
  206. DP_STATS_INC(dp_pdev,
  207. rx_buffer_pool.num_pool_bufs_replenish, 1);
  208. }
  209. return nbuf;
  210. }
  211. QDF_STATUS
  212. dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  213. struct rx_desc_pool *rx_desc_pool,
  214. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  215. {
  216. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  217. if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf))
  218. ret = qdf_nbuf_map_nbytes_single(soc->osdev,
  219. (nbuf_frag_info_t->virt_addr).nbuf,
  220. QDF_DMA_FROM_DEVICE,
  221. rx_desc_pool->buf_size);
  222. return ret;
  223. }
  224. static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
  225. {
  226. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  227. qdf_nbuf_t nbuf;
  228. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  229. QDF_STATUS ret;
  230. uint16_t head = 0;
  231. int i;
  232. if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  233. dp_err("RX refill buffer pool support is disabled");
  234. buff_pool->is_initialized = false;
  235. return;
  236. }
  237. buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
  238. buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
  239. buff_pool->tail = 0;
  240. for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
  241. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  242. RX_BUFFER_RESERVATION,
  243. rx_desc_pool->buf_alignment, FALSE);
  244. if (!nbuf)
  245. continue;
  246. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  247. QDF_DMA_FROM_DEVICE,
  248. rx_desc_pool->buf_size);
  249. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  250. qdf_nbuf_free(nbuf);
  251. continue;
  252. }
  253. buff_pool->buf_elem[head] = nbuf;
  254. head++;
  255. }
  256. buff_pool->head = head;
  257. dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
  258. buff_pool->max_bufq_len,
  259. buff_pool->head);
  260. buff_pool->is_initialized = true;
  261. }
  262. void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
  263. {
  264. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  265. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  266. qdf_nbuf_t nbuf;
  267. int i;
  268. dp_rx_refill_buff_pool_init(soc, mac_id);
  269. if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
  270. dp_err("RX buffer pool support is disabled");
  271. buff_pool->is_initialized = false;
  272. return;
  273. }
  274. if (buff_pool->is_initialized)
  275. return;
  276. qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
  277. for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
  278. nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  279. RX_BUFFER_RESERVATION,
  280. rx_desc_pool->buf_alignment, FALSE);
  281. if (!nbuf)
  282. continue;
  283. qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
  284. nbuf);
  285. }
  286. dp_info("RX buffer pool required allocation: %u actual allocation: %u",
  287. DP_RX_BUFFER_POOL_SIZE,
  288. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  289. buff_pool->is_initialized = true;
  290. }
  291. static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
  292. {
  293. struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
  294. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  295. qdf_nbuf_t nbuf;
  296. uint32_t count = 0;
  297. if (!buff_pool->is_initialized)
  298. return;
  299. while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
  300. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  301. QDF_DMA_BIDIRECTIONAL,
  302. rx_desc_pool->buf_size);
  303. qdf_nbuf_free(nbuf);
  304. count++;
  305. }
  306. dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
  307. count, buff_pool->head, buff_pool->tail);
  308. buff_pool->is_initialized = false;
  309. }
  310. void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
  311. {
  312. struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
  313. qdf_nbuf_t nbuf;
  314. dp_rx_refill_buff_pool_deinit(soc, mac_id);
  315. if (!buff_pool->is_initialized)
  316. return;
  317. dp_info("buffers in the RX buffer pool during deinit: %u",
  318. qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
  319. while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
  320. qdf_nbuf_free(nbuf);
  321. buff_pool->is_initialized = false;
  322. }
  323. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */