dp_rx_desc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_ipa.h"
  21. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  22. A_COMPILE_TIME_ASSERT(cookie_size_check,
  23. PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
  24. 1 << DP_RX_DESC_PAGE_ID_SHIFT);
  25. /*
  26. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  27. * rx descriptor pool
  28. *
  29. * @rx_desc_pool: rx descriptor pool pointer
  30. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  31. * QDF_STATUS_E_NOMEM
  32. */
  33. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  34. {
  35. if (!rx_desc_pool->desc_pages.num_pages) {
  36. dp_err("Multi page alloc fail, size=%d, elem=%d",
  37. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  38. return QDF_STATUS_E_NOMEM;
  39. }
  40. return QDF_STATUS_SUCCESS;
  41. }
  42. /*
  43. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  44. * descriptors
  45. *
  46. * @soc: core txrx main context
  47. * @num_elem: number of rx descriptors (size of the pool)
  48. * @rx_desc_pool: rx descriptor pool pointer
  49. *
  50. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  51. * QDF_STATUS_E_NOMEM
  52. * QDF_STATUS_E_FAULT
  53. */
  54. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  55. uint32_t num_elem,
  56. struct rx_desc_pool *rx_desc_pool)
  57. {
  58. uint32_t desc_size;
  59. union dp_rx_desc_list_elem_t *rx_desc_elem;
  60. desc_size = sizeof(*rx_desc_elem);
  61. rx_desc_pool->elem_size = desc_size;
  62. dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
  63. &rx_desc_pool->desc_pages,
  64. desc_size, num_elem, 0, true);
  65. if (!rx_desc_pool->desc_pages.num_pages) {
  66. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  67. desc_size, num_elem);
  68. return QDF_STATUS_E_NOMEM;
  69. }
  70. if (qdf_mem_multi_page_link(soc->osdev,
  71. &rx_desc_pool->desc_pages,
  72. desc_size, num_elem, true)) {
  73. qdf_err("overflow num link,size=%d, elem=%d",
  74. desc_size, num_elem);
  75. goto free_rx_desc_pool;
  76. }
  77. return QDF_STATUS_SUCCESS;
  78. free_rx_desc_pool:
  79. dp_rx_desc_pool_free(soc, rx_desc_pool);
  80. return QDF_STATUS_E_FAULT;
  81. }
  82. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  83. struct rx_desc_pool *rx_desc_pool,
  84. uint32_t pool_id)
  85. {
  86. uint32_t id, page_id, offset, num_desc_per_page;
  87. uint32_t count = 0;
  88. union dp_rx_desc_list_elem_t *rx_desc_elem;
  89. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  90. rx_desc_elem = rx_desc_pool->freelist;
  91. while (rx_desc_elem) {
  92. page_id = count / num_desc_per_page;
  93. offset = count % num_desc_per_page;
  94. /*
  95. * Below cookie size is from REO destination ring
  96. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  97. * cookie size = 21 bits
  98. * 8 bits - offset
  99. * 8 bits - page ID
  100. * 4 bits - pool ID
  101. */
  102. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  103. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  104. offset);
  105. rx_desc_elem->rx_desc.cookie = id;
  106. rx_desc_elem->rx_desc.pool_id = pool_id;
  107. rx_desc_elem->rx_desc.in_use = 0;
  108. rx_desc_elem = rx_desc_elem->next;
  109. count++;
  110. }
  111. return QDF_STATUS_SUCCESS;
  112. }
  113. /*
  114. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  115. * convert the pool of memory into a list of
  116. * rx descriptors and create locks to access this
  117. * list of rx descriptors.
  118. *
  119. * @soc: core txrx main context
  120. * @pool_id: pool_id which is one of 3 mac_ids
  121. * @pool_size: size of the rx descriptor pool
  122. * @rx_desc_pool: rx descriptor pool pointer
  123. */
  124. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  125. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  126. {
  127. QDF_STATUS status;
  128. /* Initialize the lock */
  129. qdf_spinlock_create(&rx_desc_pool->lock);
  130. qdf_spin_lock_bh(&rx_desc_pool->lock);
  131. rx_desc_pool->pool_size = pool_size;
  132. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  133. *rx_desc_pool->desc_pages.cacheable_pages;
  134. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  135. pool_id);
  136. if (!QDF_IS_STATUS_SUCCESS(status))
  137. dp_err("RX desc pool initialization failed");
  138. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  139. }
  140. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  141. struct rx_desc_pool *rx_desc_pool)
  142. {
  143. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  144. rx_desc_pool->elem_size * offset;
  145. }
  146. static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
  147. struct rx_desc_pool *rx_desc_pool,
  148. qdf_nbuf_t *nbuf_unmap_list,
  149. qdf_nbuf_t *nbuf_free_list)
  150. {
  151. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  152. union dp_rx_desc_list_elem_t *rx_desc_elem;
  153. struct dp_rx_desc *rx_desc;
  154. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
  155. qdf_err("No pages found on this desc pool");
  156. return QDF_STATUS_E_INVAL;
  157. }
  158. num_desc = rx_desc_pool->pool_size;
  159. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  160. for (i = 0; i < num_desc; i++) {
  161. page_id = i / num_desc_per_page;
  162. offset = i % num_desc_per_page;
  163. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  164. rx_desc = &rx_desc_elem->rx_desc;
  165. dp_rx_desc_free_dbg_info(rx_desc);
  166. if (rx_desc->in_use) {
  167. if (!rx_desc->unmapped) {
  168. DP_RX_HEAD_APPEND(*nbuf_unmap_list,
  169. rx_desc->nbuf);
  170. rx_desc->unmapped = 1;
  171. } else {
  172. DP_RX_HEAD_APPEND(*nbuf_free_list,
  173. rx_desc->nbuf);
  174. }
  175. }
  176. }
  177. return QDF_STATUS_SUCCESS;
  178. }
  179. static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
  180. qdf_nbuf_t nbuf_unmap_list,
  181. qdf_nbuf_t nbuf_free_list,
  182. uint16_t buf_size)
  183. {
  184. qdf_nbuf_t nbuf = nbuf_unmap_list;
  185. qdf_nbuf_t next;
  186. while (nbuf) {
  187. next = nbuf->next;
  188. if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
  189. false))
  190. dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
  191. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  192. QDF_DMA_BIDIRECTIONAL, buf_size);
  193. qdf_nbuf_free(nbuf);
  194. nbuf = next;
  195. }
  196. nbuf = nbuf_free_list;
  197. while (nbuf) {
  198. next = nbuf->next;
  199. qdf_nbuf_free(nbuf);
  200. nbuf = next;
  201. }
  202. }
  203. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  204. struct rx_desc_pool *rx_desc_pool)
  205. {
  206. qdf_nbuf_t nbuf_unmap_list = NULL;
  207. qdf_nbuf_t nbuf_free_list = NULL;
  208. qdf_spin_lock_bh(&rx_desc_pool->lock);
  209. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  210. &nbuf_unmap_list, &nbuf_free_list);
  211. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  212. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  213. rx_desc_pool->buf_size);
  214. qdf_spinlock_destroy(&rx_desc_pool->lock);
  215. }
  216. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  217. struct rx_desc_pool *rx_desc_pool)
  218. {
  219. qdf_nbuf_t nbuf_unmap_list = NULL;
  220. qdf_nbuf_t nbuf_free_list = NULL;
  221. qdf_spin_lock_bh(&rx_desc_pool->lock);
  222. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  223. &nbuf_unmap_list, &nbuf_free_list);
  224. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  225. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  226. rx_desc_pool->buf_size);
  227. }
  228. void dp_rx_desc_pool_free(struct dp_soc *soc,
  229. struct rx_desc_pool *rx_desc_pool)
  230. {
  231. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  232. return;
  233. dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
  234. &rx_desc_pool->desc_pages, 0, true);
  235. }
  236. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  237. struct rx_desc_pool *rx_desc_pool,
  238. uint32_t pool_id)
  239. {
  240. qdf_spin_lock_bh(&rx_desc_pool->lock);
  241. rx_desc_pool->freelist = NULL;
  242. rx_desc_pool->pool_size = 0;
  243. /* Deinitialize rx mon desr frag flag */
  244. rx_desc_pool->rx_mon_dest_frag_enable = false;
  245. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  246. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  247. qdf_spinlock_destroy(&rx_desc_pool->lock);
  248. }
  249. #else
  250. /*
  251. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  252. * rx descriptor pool
  253. *
  254. * @rx_desc_pool: rx descriptor pool pointer
  255. *
  256. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  257. * QDF_STATUS_E_NOMEM
  258. */
  259. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  260. {
  261. if (!rx_desc_pool->array) {
  262. dp_err("nss-wifi<4> skip Rx refil");
  263. return QDF_STATUS_E_NOMEM;
  264. }
  265. return QDF_STATUS_SUCCESS;
  266. }
  267. /*
  268. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  269. * descriptors
  270. *
  271. * @soc: core txrx main context
  272. * @num_elem: number of rx descriptors (size of the pool)
  273. * @rx_desc_pool: rx descriptor pool pointer
  274. *
  275. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  276. * QDF_STATUS_E_NOMEM
  277. * QDF_STATUS_E_FAULT
  278. */
  279. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  280. uint32_t pool_size,
  281. struct rx_desc_pool *rx_desc_pool)
  282. {
  283. rx_desc_pool->array = qdf_mem_malloc(pool_size *
  284. sizeof(union dp_rx_desc_list_elem_t));
  285. if (!(rx_desc_pool->array)) {
  286. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  287. "RX Desc Pool allocation failed");
  288. return QDF_STATUS_E_NOMEM;
  289. }
  290. return QDF_STATUS_SUCCESS;
  291. }
  292. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  293. struct rx_desc_pool *rx_desc_pool,
  294. uint32_t pool_id)
  295. {
  296. int i;
  297. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  298. if (i == rx_desc_pool->pool_size - 1)
  299. rx_desc_pool->array[i].next = NULL;
  300. else
  301. rx_desc_pool->array[i].next =
  302. &rx_desc_pool->array[i + 1];
  303. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  304. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  305. rx_desc_pool->array[i].rx_desc.in_use = 0;
  306. }
  307. return QDF_STATUS_SUCCESS;
  308. }
  309. /*
  310. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  311. * convert the pool of memory into a list of
  312. * rx descriptors and create locks to access this
  313. * list of rx descriptors.
  314. *
  315. * @soc: core txrx main context
  316. * @pool_id: pool_id which is one of 3 mac_ids
  317. * @pool_size: size of the rx descriptor pool
  318. * @rx_desc_pool: rx descriptor pool pointer
  319. */
  320. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  321. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  322. {
  323. QDF_STATUS status;
  324. /* Initialize the lock */
  325. qdf_spinlock_create(&rx_desc_pool->lock);
  326. qdf_spin_lock_bh(&rx_desc_pool->lock);
  327. rx_desc_pool->pool_size = pool_size;
  328. /* link SW rx descs into a freelist */
  329. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  330. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  331. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  332. pool_id);
  333. if (!QDF_IS_STATUS_SUCCESS(status))
  334. dp_err("RX desc pool initialization failed");
  335. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  336. }
  337. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  338. struct rx_desc_pool *rx_desc_pool)
  339. {
  340. qdf_nbuf_t nbuf;
  341. int i;
  342. qdf_spin_lock_bh(&rx_desc_pool->lock);
  343. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  344. if (rx_desc_pool->array[i].rx_desc.in_use) {
  345. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  346. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  347. dp_ipa_handle_rx_buf_smmu_mapping(
  348. soc, nbuf,
  349. rx_desc_pool->buf_size,
  350. false);
  351. qdf_nbuf_unmap_nbytes_single(
  352. soc->osdev, nbuf,
  353. QDF_DMA_FROM_DEVICE,
  354. rx_desc_pool->buf_size);
  355. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  356. }
  357. qdf_nbuf_free(nbuf);
  358. }
  359. }
  360. qdf_mem_free(rx_desc_pool->array);
  361. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  362. qdf_spinlock_destroy(&rx_desc_pool->lock);
  363. }
  364. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  365. struct rx_desc_pool *rx_desc_pool)
  366. {
  367. qdf_nbuf_t nbuf;
  368. int i;
  369. qdf_spin_lock_bh(&rx_desc_pool->lock);
  370. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  371. if (rx_desc_pool->array[i].rx_desc.in_use) {
  372. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  373. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  374. dp_ipa_handle_rx_buf_smmu_mapping(
  375. soc, nbuf,
  376. rx_desc_pool->buf_size,
  377. false);
  378. qdf_nbuf_unmap_nbytes_single(
  379. soc->osdev, nbuf,
  380. QDF_DMA_FROM_DEVICE,
  381. rx_desc_pool->buf_size);
  382. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  383. }
  384. qdf_nbuf_free(nbuf);
  385. }
  386. }
  387. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  388. }
  389. /**
  390. * dp_rx_desc_frag_free() - Free desc frag buffer
  391. *
  392. * @soc: core txrx main context
  393. * @rx_desc_pool: rx descriptor pool pointer
  394. *
  395. * Return: None
  396. */
  397. #ifdef DP_RX_MON_MEM_FRAG
  398. void dp_rx_desc_frag_free(struct dp_soc *soc,
  399. struct rx_desc_pool *rx_desc_pool)
  400. {
  401. qdf_dma_addr_t paddr;
  402. qdf_frag_t vaddr;
  403. int i;
  404. qdf_spin_lock_bh(&rx_desc_pool->lock);
  405. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  406. if (rx_desc_pool->array[i].rx_desc.in_use) {
  407. paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
  408. vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
  409. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  410. qdf_mem_unmap_page(soc->osdev, paddr,
  411. rx_desc_pool->buf_size,
  412. QDF_DMA_FROM_DEVICE);
  413. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  414. }
  415. qdf_frag_free(vaddr);
  416. }
  417. }
  418. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  419. }
  420. #endif
  421. void dp_rx_desc_pool_free(struct dp_soc *soc,
  422. struct rx_desc_pool *rx_desc_pool)
  423. {
  424. qdf_mem_free(rx_desc_pool->array);
  425. }
  426. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  427. struct rx_desc_pool *rx_desc_pool,
  428. uint32_t pool_id)
  429. {
  430. qdf_spin_lock_bh(&rx_desc_pool->lock);
  431. rx_desc_pool->freelist = NULL;
  432. rx_desc_pool->pool_size = 0;
  433. /* Deinitialize rx mon desr frag flag */
  434. rx_desc_pool->rx_mon_dest_frag_enable = false;
  435. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  436. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  437. qdf_spinlock_destroy(&rx_desc_pool->lock);
  438. }
  439. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  440. void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
  441. struct rx_desc_pool *rx_desc_pool,
  442. uint32_t pool_id)
  443. {
  444. }
  445. /*
  446. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  447. * the free rx desc pool.
  448. *
  449. * @soc: core txrx main context
  450. * @pool_id: pool_id which is one of 3 mac_ids
  451. * @rx_desc_pool: rx descriptor pool pointer
  452. * @num_descs: number of descs requested from freelist
  453. * @desc_list: attach the descs to this list (output parameter)
  454. * @tail: attach the point to last desc of free list (output parameter)
  455. *
  456. * Return: number of descs allocated from free list.
  457. */
  458. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  459. struct rx_desc_pool *rx_desc_pool,
  460. uint16_t num_descs,
  461. union dp_rx_desc_list_elem_t **desc_list,
  462. union dp_rx_desc_list_elem_t **tail)
  463. {
  464. uint16_t count;
  465. qdf_spin_lock_bh(&rx_desc_pool->lock);
  466. *desc_list = *tail = rx_desc_pool->freelist;
  467. for (count = 0; count < num_descs; count++) {
  468. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  469. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  470. return count;
  471. }
  472. *tail = rx_desc_pool->freelist;
  473. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  474. }
  475. (*tail)->next = NULL;
  476. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  477. return count;
  478. }
  479. /*
  480. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  481. * freelist.
  482. *
  483. * @soc: core txrx main context
  484. * @local_desc_list: local desc list provided by the caller
  485. * @tail: attach the point to last desc of local desc list
  486. * @pool_id: pool_id which is one of 3 mac_ids
  487. * @rx_desc_pool: rx descriptor pool pointer
  488. */
  489. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  490. union dp_rx_desc_list_elem_t **local_desc_list,
  491. union dp_rx_desc_list_elem_t **tail,
  492. uint16_t pool_id,
  493. struct rx_desc_pool *rx_desc_pool)
  494. {
  495. union dp_rx_desc_list_elem_t *temp_list = NULL;
  496. qdf_spin_lock_bh(&rx_desc_pool->lock);
  497. temp_list = rx_desc_pool->freelist;
  498. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  499. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  500. temp_list, *local_desc_list, *tail, (*tail)->next);
  501. rx_desc_pool->freelist = *local_desc_list;
  502. (*tail)->next = temp_list;
  503. *tail = NULL;
  504. *local_desc_list = NULL;
  505. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  506. }