dp_rx_desc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_ipa.h"
  21. #include <qdf_module.h>
  22. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  23. A_COMPILE_TIME_ASSERT(cookie_size_check,
  24. PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
  25. 1 << DP_RX_DESC_PAGE_ID_SHIFT);
  26. /*
  27. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  28. * rx descriptor pool
  29. *
  30. * @rx_desc_pool: rx descriptor pool pointer
  31. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  32. * QDF_STATUS_E_NOMEM
  33. */
  34. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  35. {
  36. if (!rx_desc_pool->desc_pages.num_pages) {
  37. dp_err("Multi page alloc fail, size=%d, elem=%d",
  38. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  39. return QDF_STATUS_E_NOMEM;
  40. }
  41. return QDF_STATUS_SUCCESS;
  42. }
  43. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  44. /*
  45. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  46. * descriptors
  47. *
  48. * @soc: core txrx main context
  49. * @num_elem: number of rx descriptors (size of the pool)
  50. * @rx_desc_pool: rx descriptor pool pointer
  51. *
  52. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  53. * QDF_STATUS_E_NOMEM
  54. * QDF_STATUS_E_FAULT
  55. */
  56. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  57. uint32_t num_elem,
  58. struct rx_desc_pool *rx_desc_pool)
  59. {
  60. uint32_t desc_size;
  61. union dp_rx_desc_list_elem_t *rx_desc_elem;
  62. desc_size = sizeof(*rx_desc_elem);
  63. rx_desc_pool->elem_size = desc_size;
  64. dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
  65. &rx_desc_pool->desc_pages,
  66. desc_size, num_elem, 0, true);
  67. if (!rx_desc_pool->desc_pages.num_pages) {
  68. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  69. desc_size, num_elem);
  70. return QDF_STATUS_E_NOMEM;
  71. }
  72. if (qdf_mem_multi_page_link(soc->osdev,
  73. &rx_desc_pool->desc_pages,
  74. desc_size, num_elem, true)) {
  75. qdf_err("overflow num link,size=%d, elem=%d",
  76. desc_size, num_elem);
  77. goto free_rx_desc_pool;
  78. }
  79. return QDF_STATUS_SUCCESS;
  80. free_rx_desc_pool:
  81. dp_rx_desc_pool_free(soc, rx_desc_pool);
  82. return QDF_STATUS_E_FAULT;
  83. }
  84. qdf_export_symbol(dp_rx_desc_pool_alloc);
  85. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  86. struct rx_desc_pool *rx_desc_pool,
  87. uint32_t pool_id)
  88. {
  89. uint32_t id, page_id, offset, num_desc_per_page;
  90. uint32_t count = 0;
  91. union dp_rx_desc_list_elem_t *rx_desc_elem;
  92. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  93. rx_desc_elem = rx_desc_pool->freelist;
  94. while (rx_desc_elem) {
  95. page_id = count / num_desc_per_page;
  96. offset = count % num_desc_per_page;
  97. /*
  98. * Below cookie size is from REO destination ring
  99. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  100. * cookie size = 21 bits
  101. * 8 bits - offset
  102. * 8 bits - page ID
  103. * 4 bits - pool ID
  104. */
  105. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  106. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  107. offset);
  108. rx_desc_elem->rx_desc.cookie = id;
  109. rx_desc_elem->rx_desc.pool_id = pool_id;
  110. rx_desc_elem->rx_desc.in_use = 0;
  111. rx_desc_elem = rx_desc_elem->next;
  112. count++;
  113. }
  114. return QDF_STATUS_SUCCESS;
  115. }
  116. /*
  117. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  118. * convert the pool of memory into a list of
  119. * rx descriptors and create locks to access this
  120. * list of rx descriptors.
  121. *
  122. * @soc: core txrx main context
  123. * @pool_id: pool_id which is one of 3 mac_ids
  124. * @pool_size: size of the rx descriptor pool
  125. * @rx_desc_pool: rx descriptor pool pointer
  126. */
  127. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  128. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  129. {
  130. QDF_STATUS status;
  131. /* Initialize the lock */
  132. qdf_spinlock_create(&rx_desc_pool->lock);
  133. qdf_spin_lock_bh(&rx_desc_pool->lock);
  134. rx_desc_pool->pool_size = pool_size;
  135. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  136. *rx_desc_pool->desc_pages.cacheable_pages;
  137. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  138. pool_id);
  139. if (!QDF_IS_STATUS_SUCCESS(status))
  140. dp_err("RX desc pool initialization failed");
  141. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  142. }
  143. qdf_export_symbol(dp_rx_desc_pool_init);
  144. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  145. struct rx_desc_pool *rx_desc_pool)
  146. {
  147. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  148. rx_desc_pool->elem_size * offset;
  149. }
  150. static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
  151. struct rx_desc_pool *rx_desc_pool,
  152. qdf_nbuf_t *nbuf_unmap_list,
  153. qdf_nbuf_t *nbuf_free_list)
  154. {
  155. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  156. union dp_rx_desc_list_elem_t *rx_desc_elem;
  157. struct dp_rx_desc *rx_desc;
  158. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
  159. qdf_err("No pages found on this desc pool");
  160. return QDF_STATUS_E_INVAL;
  161. }
  162. num_desc = rx_desc_pool->pool_size;
  163. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  164. for (i = 0; i < num_desc; i++) {
  165. page_id = i / num_desc_per_page;
  166. offset = i % num_desc_per_page;
  167. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  168. rx_desc = &rx_desc_elem->rx_desc;
  169. dp_rx_desc_free_dbg_info(rx_desc);
  170. if (rx_desc->in_use) {
  171. if (!rx_desc->unmapped) {
  172. DP_RX_HEAD_APPEND(*nbuf_unmap_list,
  173. rx_desc->nbuf);
  174. rx_desc->unmapped = 1;
  175. } else {
  176. DP_RX_HEAD_APPEND(*nbuf_free_list,
  177. rx_desc->nbuf);
  178. }
  179. }
  180. }
  181. return QDF_STATUS_SUCCESS;
  182. }
  183. static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
  184. qdf_nbuf_t nbuf_unmap_list,
  185. qdf_nbuf_t nbuf_free_list,
  186. uint16_t buf_size)
  187. {
  188. qdf_nbuf_t nbuf = nbuf_unmap_list;
  189. qdf_nbuf_t next;
  190. while (nbuf) {
  191. next = nbuf->next;
  192. if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
  193. false))
  194. dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
  195. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  196. QDF_DMA_BIDIRECTIONAL, buf_size);
  197. qdf_nbuf_free(nbuf);
  198. nbuf = next;
  199. }
  200. nbuf = nbuf_free_list;
  201. while (nbuf) {
  202. next = nbuf->next;
  203. qdf_nbuf_free(nbuf);
  204. nbuf = next;
  205. }
  206. }
  207. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  208. struct rx_desc_pool *rx_desc_pool)
  209. {
  210. qdf_nbuf_t nbuf_unmap_list = NULL;
  211. qdf_nbuf_t nbuf_free_list = NULL;
  212. qdf_spin_lock_bh(&rx_desc_pool->lock);
  213. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  214. &nbuf_unmap_list, &nbuf_free_list);
  215. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  216. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  217. rx_desc_pool->buf_size);
  218. qdf_spinlock_destroy(&rx_desc_pool->lock);
  219. }
  220. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  221. struct rx_desc_pool *rx_desc_pool)
  222. {
  223. qdf_nbuf_t nbuf_unmap_list = NULL;
  224. qdf_nbuf_t nbuf_free_list = NULL;
  225. qdf_spin_lock_bh(&rx_desc_pool->lock);
  226. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  227. &nbuf_unmap_list, &nbuf_free_list);
  228. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  229. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  230. rx_desc_pool->buf_size);
  231. }
  232. qdf_export_symbol(dp_rx_desc_nbuf_free);
  233. void dp_rx_desc_pool_free(struct dp_soc *soc,
  234. struct rx_desc_pool *rx_desc_pool)
  235. {
  236. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  237. return;
  238. dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
  239. &rx_desc_pool->desc_pages, 0, true);
  240. }
  241. qdf_export_symbol(dp_rx_desc_pool_free);
  242. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  243. struct rx_desc_pool *rx_desc_pool,
  244. uint32_t pool_id)
  245. {
  246. qdf_spin_lock_bh(&rx_desc_pool->lock);
  247. rx_desc_pool->freelist = NULL;
  248. rx_desc_pool->pool_size = 0;
  249. /* Deinitialize rx mon desr frag flag */
  250. rx_desc_pool->rx_mon_dest_frag_enable = false;
  251. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  252. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  253. qdf_spinlock_destroy(&rx_desc_pool->lock);
  254. }
  255. qdf_export_symbol(dp_rx_desc_pool_deinit);
  256. #else
  257. /*
  258. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  259. * rx descriptor pool
  260. *
  261. * @rx_desc_pool: rx descriptor pool pointer
  262. *
  263. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  264. * QDF_STATUS_E_NOMEM
  265. */
  266. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  267. {
  268. if (!rx_desc_pool->array) {
  269. dp_err("nss-wifi<4> skip Rx refil");
  270. return QDF_STATUS_E_NOMEM;
  271. }
  272. return QDF_STATUS_SUCCESS;
  273. }
  274. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  275. /*
  276. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  277. * descriptors
  278. *
  279. * @soc: core txrx main context
  280. * @num_elem: number of rx descriptors (size of the pool)
  281. * @rx_desc_pool: rx descriptor pool pointer
  282. *
  283. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  284. * QDF_STATUS_E_NOMEM
  285. * QDF_STATUS_E_FAULT
  286. */
  287. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  288. uint32_t pool_size,
  289. struct rx_desc_pool *rx_desc_pool)
  290. {
  291. rx_desc_pool->array = qdf_mem_malloc(pool_size *
  292. sizeof(union dp_rx_desc_list_elem_t));
  293. if (!(rx_desc_pool->array)) {
  294. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  295. "RX Desc Pool allocation failed");
  296. return QDF_STATUS_E_NOMEM;
  297. }
  298. return QDF_STATUS_SUCCESS;
  299. }
  300. qdf_export_symbol(dp_rx_desc_pool_alloc);
  301. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  302. struct rx_desc_pool *rx_desc_pool,
  303. uint32_t pool_id)
  304. {
  305. int i;
  306. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  307. if (i == rx_desc_pool->pool_size - 1)
  308. rx_desc_pool->array[i].next = NULL;
  309. else
  310. rx_desc_pool->array[i].next =
  311. &rx_desc_pool->array[i + 1];
  312. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  313. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  314. rx_desc_pool->array[i].rx_desc.in_use = 0;
  315. }
  316. return QDF_STATUS_SUCCESS;
  317. }
  318. /*
  319. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  320. * convert the pool of memory into a list of
  321. * rx descriptors and create locks to access this
  322. * list of rx descriptors.
  323. *
  324. * @soc: core txrx main context
  325. * @pool_id: pool_id which is one of 3 mac_ids
  326. * @pool_size: size of the rx descriptor pool
  327. * @rx_desc_pool: rx descriptor pool pointer
  328. */
  329. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  330. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  331. {
  332. QDF_STATUS status;
  333. /* Initialize the lock */
  334. qdf_spinlock_create(&rx_desc_pool->lock);
  335. qdf_spin_lock_bh(&rx_desc_pool->lock);
  336. rx_desc_pool->pool_size = pool_size;
  337. /* link SW rx descs into a freelist */
  338. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  339. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  340. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  341. pool_id);
  342. if (!QDF_IS_STATUS_SUCCESS(status))
  343. dp_err("RX desc pool initialization failed");
  344. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  345. }
  346. qdf_export_symbol(dp_rx_desc_pool_init);
  347. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  348. struct rx_desc_pool *rx_desc_pool)
  349. {
  350. qdf_nbuf_t nbuf;
  351. int i;
  352. qdf_spin_lock_bh(&rx_desc_pool->lock);
  353. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  354. if (rx_desc_pool->array[i].rx_desc.in_use) {
  355. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  356. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  357. dp_ipa_handle_rx_buf_smmu_mapping(
  358. soc, nbuf,
  359. rx_desc_pool->buf_size,
  360. false);
  361. qdf_nbuf_unmap_nbytes_single(
  362. soc->osdev, nbuf,
  363. QDF_DMA_FROM_DEVICE,
  364. rx_desc_pool->buf_size);
  365. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  366. }
  367. qdf_nbuf_free(nbuf);
  368. }
  369. }
  370. qdf_mem_free(rx_desc_pool->array);
  371. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  372. qdf_spinlock_destroy(&rx_desc_pool->lock);
  373. }
  374. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  375. struct rx_desc_pool *rx_desc_pool)
  376. {
  377. qdf_nbuf_t nbuf;
  378. int i;
  379. qdf_spin_lock_bh(&rx_desc_pool->lock);
  380. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  381. if (rx_desc_pool->array[i].rx_desc.in_use) {
  382. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  383. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  384. dp_ipa_handle_rx_buf_smmu_mapping(
  385. soc, nbuf,
  386. rx_desc_pool->buf_size,
  387. false);
  388. qdf_nbuf_unmap_nbytes_single(
  389. soc->osdev, nbuf,
  390. QDF_DMA_FROM_DEVICE,
  391. rx_desc_pool->buf_size);
  392. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  393. }
  394. qdf_nbuf_free(nbuf);
  395. }
  396. }
  397. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  398. }
  399. qdf_export_symbol(dp_rx_desc_nbuf_free);
  400. /**
  401. * dp_rx_desc_frag_free() - Free desc frag buffer
  402. *
  403. * @soc: core txrx main context
  404. * @rx_desc_pool: rx descriptor pool pointer
  405. *
  406. * Return: None
  407. */
  408. #ifdef DP_RX_MON_MEM_FRAG
  409. void dp_rx_desc_frag_free(struct dp_soc *soc,
  410. struct rx_desc_pool *rx_desc_pool)
  411. {
  412. qdf_dma_addr_t paddr;
  413. qdf_frag_t vaddr;
  414. int i;
  415. qdf_spin_lock_bh(&rx_desc_pool->lock);
  416. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  417. if (rx_desc_pool->array[i].rx_desc.in_use) {
  418. paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
  419. vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
  420. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  421. qdf_mem_unmap_page(soc->osdev, paddr,
  422. rx_desc_pool->buf_size,
  423. QDF_DMA_FROM_DEVICE);
  424. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  425. }
  426. qdf_frag_free(vaddr);
  427. }
  428. }
  429. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  430. }
  431. qdf_export_symbol(dp_rx_desc_frag_free);
  432. #endif
  433. void dp_rx_desc_pool_free(struct dp_soc *soc,
  434. struct rx_desc_pool *rx_desc_pool)
  435. {
  436. qdf_mem_free(rx_desc_pool->array);
  437. }
  438. qdf_export_symbol(dp_rx_desc_pool_free);
  439. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  440. struct rx_desc_pool *rx_desc_pool,
  441. uint32_t pool_id)
  442. {
  443. qdf_spin_lock_bh(&rx_desc_pool->lock);
  444. rx_desc_pool->freelist = NULL;
  445. rx_desc_pool->pool_size = 0;
  446. /* Deinitialize rx mon desr frag flag */
  447. rx_desc_pool->rx_mon_dest_frag_enable = false;
  448. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  449. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  450. qdf_spinlock_destroy(&rx_desc_pool->lock);
  451. }
  452. qdf_export_symbol(dp_rx_desc_pool_deinit);
  453. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  454. void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
  455. struct rx_desc_pool *rx_desc_pool,
  456. uint32_t pool_id)
  457. {
  458. }
  459. /*
  460. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  461. * the free rx desc pool.
  462. *
  463. * @soc: core txrx main context
  464. * @pool_id: pool_id which is one of 3 mac_ids
  465. * @rx_desc_pool: rx descriptor pool pointer
  466. * @num_descs: number of descs requested from freelist
  467. * @desc_list: attach the descs to this list (output parameter)
  468. * @tail: attach the point to last desc of free list (output parameter)
  469. *
  470. * Return: number of descs allocated from free list.
  471. */
  472. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  473. struct rx_desc_pool *rx_desc_pool,
  474. uint16_t num_descs,
  475. union dp_rx_desc_list_elem_t **desc_list,
  476. union dp_rx_desc_list_elem_t **tail)
  477. {
  478. uint16_t count;
  479. qdf_spin_lock_bh(&rx_desc_pool->lock);
  480. *desc_list = *tail = rx_desc_pool->freelist;
  481. for (count = 0; count < num_descs; count++) {
  482. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  483. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  484. return count;
  485. }
  486. *tail = rx_desc_pool->freelist;
  487. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  488. }
  489. (*tail)->next = NULL;
  490. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  491. return count;
  492. }
  493. qdf_export_symbol(dp_rx_get_free_desc_list);
  494. /*
  495. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  496. * freelist.
  497. *
  498. * @soc: core txrx main context
  499. * @local_desc_list: local desc list provided by the caller
  500. * @tail: attach the point to last desc of local desc list
  501. * @pool_id: pool_id which is one of 3 mac_ids
  502. * @rx_desc_pool: rx descriptor pool pointer
  503. */
  504. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  505. union dp_rx_desc_list_elem_t **local_desc_list,
  506. union dp_rx_desc_list_elem_t **tail,
  507. uint16_t pool_id,
  508. struct rx_desc_pool *rx_desc_pool)
  509. {
  510. union dp_rx_desc_list_elem_t *temp_list = NULL;
  511. qdf_spin_lock_bh(&rx_desc_pool->lock);
  512. temp_list = rx_desc_pool->freelist;
  513. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  514. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  515. temp_list, *local_desc_list, *tail, (*tail)->next);
  516. rx_desc_pool->freelist = *local_desc_list;
  517. (*tail)->next = temp_list;
  518. *tail = NULL;
  519. *local_desc_list = NULL;
  520. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  521. }
  522. qdf_export_symbol(dp_rx_add_desc_list_to_free_list);