dp_rx_desc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_ipa.h"
  22. #include <qdf_module.h>
  23. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  24. A_COMPILE_TIME_ASSERT(cookie_size_check,
  25. (DP_BLOCKMEM_SIZE /
  26. sizeof(union dp_rx_desc_list_elem_t))
  27. <= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
  28. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  29. {
  30. if (!rx_desc_pool->desc_pages.num_pages) {
  31. dp_err("Multi page alloc fail, size=%d, elem=%d",
  32. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  33. return QDF_STATUS_E_NOMEM;
  34. }
  35. return QDF_STATUS_SUCCESS;
  36. }
  37. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  38. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  39. uint32_t num_elem,
  40. struct rx_desc_pool *rx_desc_pool)
  41. {
  42. uint32_t desc_size;
  43. union dp_rx_desc_list_elem_t *rx_desc_elem;
  44. desc_size = sizeof(*rx_desc_elem);
  45. rx_desc_pool->elem_size = desc_size;
  46. rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  47. dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
  48. &rx_desc_pool->desc_pages,
  49. desc_size, num_elem, 0, true);
  50. if (!rx_desc_pool->desc_pages.num_pages) {
  51. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  52. desc_size, num_elem);
  53. return QDF_STATUS_E_NOMEM;
  54. }
  55. if (qdf_mem_multi_page_link(soc->osdev,
  56. &rx_desc_pool->desc_pages,
  57. desc_size, num_elem, true)) {
  58. qdf_err("overflow num link,size=%d, elem=%d",
  59. desc_size, num_elem);
  60. goto free_rx_desc_pool;
  61. }
  62. return QDF_STATUS_SUCCESS;
  63. free_rx_desc_pool:
  64. dp_rx_desc_pool_free(soc, rx_desc_pool);
  65. return QDF_STATUS_E_FAULT;
  66. }
  67. qdf_export_symbol(dp_rx_desc_pool_alloc);
  68. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  69. struct rx_desc_pool *rx_desc_pool,
  70. uint32_t pool_id)
  71. {
  72. uint32_t id, page_id, offset, num_desc_per_page;
  73. uint32_t count = 0;
  74. union dp_rx_desc_list_elem_t *rx_desc_elem;
  75. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  76. rx_desc_elem = rx_desc_pool->freelist;
  77. while (rx_desc_elem) {
  78. page_id = count / num_desc_per_page;
  79. offset = count % num_desc_per_page;
  80. /*
  81. * Below cookie size is from REO destination ring
  82. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  83. * cookie size = 21 bits
  84. * 8 bits - offset
  85. * 8 bits - page ID
  86. * 4 bits - pool ID
  87. */
  88. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  89. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  90. offset);
  91. rx_desc_elem->rx_desc.cookie = id;
  92. rx_desc_elem->rx_desc.pool_id = pool_id;
  93. rx_desc_elem->rx_desc.in_use = 0;
  94. rx_desc_elem = rx_desc_elem->next;
  95. count++;
  96. }
  97. return QDF_STATUS_SUCCESS;
  98. }
  99. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  100. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  101. {
  102. QDF_STATUS status;
  103. /* Initialize the lock */
  104. qdf_spinlock_create(&rx_desc_pool->lock);
  105. qdf_spin_lock_bh(&rx_desc_pool->lock);
  106. rx_desc_pool->pool_size = pool_size;
  107. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  108. *rx_desc_pool->desc_pages.cacheable_pages;
  109. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  110. pool_id);
  111. if (!QDF_IS_STATUS_SUCCESS(status))
  112. dp_err("RX desc pool initialization failed");
  113. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  114. }
  115. qdf_export_symbol(dp_rx_desc_pool_init);
  116. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  117. struct rx_desc_pool *rx_desc_pool)
  118. {
  119. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  120. rx_desc_pool->elem_size * offset;
  121. }
  122. static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
  123. struct rx_desc_pool *rx_desc_pool,
  124. qdf_nbuf_t *nbuf_unmap_list,
  125. qdf_nbuf_t *nbuf_free_list)
  126. {
  127. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  128. union dp_rx_desc_list_elem_t *rx_desc_elem;
  129. struct dp_rx_desc *rx_desc;
  130. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
  131. qdf_err("No pages found on this desc pool");
  132. return QDF_STATUS_E_INVAL;
  133. }
  134. num_desc = rx_desc_pool->pool_size;
  135. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  136. for (i = 0; i < num_desc; i++) {
  137. page_id = i / num_desc_per_page;
  138. offset = i % num_desc_per_page;
  139. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  140. rx_desc = &rx_desc_elem->rx_desc;
  141. dp_rx_desc_free_dbg_info(rx_desc);
  142. if (rx_desc->in_use) {
  143. if (!rx_desc->unmapped) {
  144. DP_RX_HEAD_APPEND(*nbuf_unmap_list,
  145. rx_desc->nbuf);
  146. rx_desc->unmapped = 1;
  147. } else {
  148. DP_RX_HEAD_APPEND(*nbuf_free_list,
  149. rx_desc->nbuf);
  150. }
  151. }
  152. }
  153. return QDF_STATUS_SUCCESS;
  154. }
  155. static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
  156. qdf_nbuf_t nbuf_unmap_list,
  157. qdf_nbuf_t nbuf_free_list,
  158. uint16_t buf_size,
  159. bool is_mon_pool)
  160. {
  161. qdf_nbuf_t nbuf = nbuf_unmap_list;
  162. qdf_nbuf_t next;
  163. while (nbuf) {
  164. next = nbuf->next;
  165. if (!is_mon_pool)
  166. dp_audio_smmu_unmap(soc->osdev,
  167. QDF_NBUF_CB_PADDR(nbuf),
  168. buf_size);
  169. if (dp_ipa_handle_rx_buf_smmu_mapping(
  170. soc, nbuf, buf_size,
  171. false, __func__,
  172. __LINE__))
  173. dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
  174. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  175. QDF_DMA_BIDIRECTIONAL, buf_size);
  176. dp_rx_nbuf_free(nbuf);
  177. nbuf = next;
  178. }
  179. nbuf = nbuf_free_list;
  180. while (nbuf) {
  181. next = nbuf->next;
  182. dp_rx_nbuf_free(nbuf);
  183. nbuf = next;
  184. }
  185. }
  186. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  187. struct rx_desc_pool *rx_desc_pool)
  188. {
  189. qdf_nbuf_t nbuf_unmap_list = NULL;
  190. qdf_nbuf_t nbuf_free_list = NULL;
  191. qdf_spin_lock_bh(&rx_desc_pool->lock);
  192. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  193. &nbuf_unmap_list, &nbuf_free_list);
  194. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  195. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  196. rx_desc_pool->buf_size, false);
  197. qdf_spinlock_destroy(&rx_desc_pool->lock);
  198. }
  199. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  200. struct rx_desc_pool *rx_desc_pool,
  201. bool is_mon_pool)
  202. {
  203. qdf_nbuf_t nbuf_unmap_list = NULL;
  204. qdf_nbuf_t nbuf_free_list = NULL;
  205. qdf_spin_lock_bh(&rx_desc_pool->lock);
  206. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  207. &nbuf_unmap_list, &nbuf_free_list);
  208. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  209. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  210. rx_desc_pool->buf_size, is_mon_pool);
  211. }
  212. qdf_export_symbol(dp_rx_desc_nbuf_free);
  213. void dp_rx_desc_pool_free(struct dp_soc *soc,
  214. struct rx_desc_pool *rx_desc_pool)
  215. {
  216. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  217. return;
  218. dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
  219. &rx_desc_pool->desc_pages, 0, true);
  220. }
  221. qdf_export_symbol(dp_rx_desc_pool_free);
  222. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  223. struct rx_desc_pool *rx_desc_pool,
  224. uint32_t pool_id)
  225. {
  226. qdf_spin_lock_bh(&rx_desc_pool->lock);
  227. rx_desc_pool->freelist = NULL;
  228. rx_desc_pool->pool_size = 0;
  229. /* Deinitialize rx mon desr frag flag */
  230. rx_desc_pool->rx_mon_dest_frag_enable = false;
  231. qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
  232. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  233. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  234. qdf_spinlock_destroy(&rx_desc_pool->lock);
  235. }
  236. qdf_export_symbol(dp_rx_desc_pool_deinit);
  237. #else
  238. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  239. {
  240. if (!rx_desc_pool->array) {
  241. dp_err("nss-wifi<4> skip Rx refil");
  242. return QDF_STATUS_E_NOMEM;
  243. }
  244. return QDF_STATUS_SUCCESS;
  245. }
  246. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  247. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  248. uint32_t pool_size,
  249. struct rx_desc_pool *rx_desc_pool)
  250. {
  251. rx_desc_pool->array = qdf_mem_common_alloc(pool_size *
  252. sizeof(union dp_rx_desc_list_elem_t));
  253. if (!(rx_desc_pool->array)) {
  254. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  255. "RX Desc Pool allocation failed");
  256. return QDF_STATUS_E_NOMEM;
  257. }
  258. return QDF_STATUS_SUCCESS;
  259. }
  260. qdf_export_symbol(dp_rx_desc_pool_alloc);
  261. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  262. struct rx_desc_pool *rx_desc_pool,
  263. uint32_t pool_id)
  264. {
  265. int i;
  266. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  267. if (i == rx_desc_pool->pool_size - 1)
  268. rx_desc_pool->array[i].next = NULL;
  269. else
  270. rx_desc_pool->array[i].next =
  271. &rx_desc_pool->array[i + 1];
  272. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  273. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  274. rx_desc_pool->array[i].rx_desc.in_use = 0;
  275. }
  276. return QDF_STATUS_SUCCESS;
  277. }
  278. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  279. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  280. {
  281. QDF_STATUS status;
  282. /* Initialize the lock */
  283. qdf_spinlock_create(&rx_desc_pool->lock);
  284. qdf_spin_lock_bh(&rx_desc_pool->lock);
  285. rx_desc_pool->pool_size = pool_size;
  286. /* link SW rx descs into a freelist */
  287. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  288. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  289. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  290. pool_id);
  291. if (!QDF_IS_STATUS_SUCCESS(status))
  292. dp_err("RX desc pool initialization failed");
  293. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  294. }
  295. qdf_export_symbol(dp_rx_desc_pool_init);
  296. #ifdef WLAN_SUPPORT_PPEDS
  297. static inline
  298. qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
  299. {
  300. if (rx_desc_pool->array[i].rx_desc.has_reuse_nbuf)
  301. return rx_desc_pool->array[i].rx_desc.reuse_nbuf;
  302. else
  303. return rx_desc_pool->array[i].rx_desc.nbuf;
  304. }
  305. #else
  306. static inline
  307. qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
  308. {
  309. return rx_desc_pool->array[i].rx_desc.nbuf;
  310. }
  311. #endif
  312. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  313. struct rx_desc_pool *rx_desc_pool)
  314. {
  315. qdf_nbuf_t nbuf;
  316. int i;
  317. qdf_spin_lock_bh(&rx_desc_pool->lock);
  318. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  319. if (rx_desc_pool->array[i].rx_desc.in_use) {
  320. nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
  321. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  322. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  323. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  324. }
  325. dp_rx_nbuf_free(nbuf);
  326. }
  327. }
  328. qdf_mem_common_free(rx_desc_pool->array);
  329. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  330. qdf_spinlock_destroy(&rx_desc_pool->lock);
  331. }
  332. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  333. struct rx_desc_pool *rx_desc_pool,
  334. bool is_mon_pool)
  335. {
  336. qdf_nbuf_t nbuf;
  337. int i;
  338. qdf_spin_lock_bh(&rx_desc_pool->lock);
  339. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  340. dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
  341. if (rx_desc_pool->array[i].rx_desc.in_use) {
  342. nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
  343. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  344. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  345. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  346. }
  347. dp_rx_nbuf_free(nbuf);
  348. }
  349. }
  350. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  351. }
  352. qdf_export_symbol(dp_rx_desc_nbuf_free);
  353. #ifdef DP_RX_MON_MEM_FRAG
  354. void dp_rx_desc_frag_free(struct dp_soc *soc,
  355. struct rx_desc_pool *rx_desc_pool)
  356. {
  357. qdf_dma_addr_t paddr;
  358. qdf_frag_t vaddr;
  359. int i;
  360. qdf_spin_lock_bh(&rx_desc_pool->lock);
  361. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  362. if (rx_desc_pool->array[i].rx_desc.in_use) {
  363. paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
  364. vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
  365. dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
  366. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  367. qdf_mem_unmap_page(soc->osdev, paddr,
  368. rx_desc_pool->buf_size,
  369. QDF_DMA_FROM_DEVICE);
  370. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  371. }
  372. qdf_frag_free(vaddr);
  373. }
  374. }
  375. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  376. }
  377. qdf_export_symbol(dp_rx_desc_frag_free);
  378. #endif
  379. void dp_rx_desc_pool_free(struct dp_soc *soc,
  380. struct rx_desc_pool *rx_desc_pool)
  381. {
  382. qdf_mem_common_free(rx_desc_pool->array);
  383. }
  384. qdf_export_symbol(dp_rx_desc_pool_free);
  385. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  386. struct rx_desc_pool *rx_desc_pool,
  387. uint32_t pool_id)
  388. {
  389. if (rx_desc_pool->pool_size) {
  390. qdf_spin_lock_bh(&rx_desc_pool->lock);
  391. rx_desc_pool->freelist = NULL;
  392. rx_desc_pool->pool_size = 0;
  393. /* Deinitialize rx mon dest frag flag */
  394. rx_desc_pool->rx_mon_dest_frag_enable = false;
  395. qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
  396. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool,
  397. pool_id);
  398. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  399. qdf_spinlock_destroy(&rx_desc_pool->lock);
  400. }
  401. }
  402. qdf_export_symbol(dp_rx_desc_pool_deinit);
  403. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  404. void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
  405. struct rx_desc_pool *rx_desc_pool,
  406. uint32_t pool_id)
  407. {
  408. }
  409. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  410. struct rx_desc_pool *rx_desc_pool,
  411. uint16_t num_descs,
  412. union dp_rx_desc_list_elem_t **desc_list,
  413. union dp_rx_desc_list_elem_t **tail)
  414. {
  415. uint16_t count;
  416. qdf_spin_lock_bh(&rx_desc_pool->lock);
  417. *desc_list = *tail = rx_desc_pool->freelist;
  418. for (count = 0; count < num_descs; count++) {
  419. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  420. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  421. return count;
  422. }
  423. *tail = rx_desc_pool->freelist;
  424. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  425. }
  426. (*tail)->next = NULL;
  427. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  428. return count;
  429. }
  430. qdf_export_symbol(dp_rx_get_free_desc_list);
  431. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  432. union dp_rx_desc_list_elem_t **local_desc_list,
  433. union dp_rx_desc_list_elem_t **tail,
  434. uint16_t pool_id,
  435. struct rx_desc_pool *rx_desc_pool)
  436. {
  437. union dp_rx_desc_list_elem_t *temp_list = NULL;
  438. qdf_spin_lock_bh(&rx_desc_pool->lock);
  439. temp_list = rx_desc_pool->freelist;
  440. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  441. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  442. temp_list, *local_desc_list, *tail, (*tail)->next);
  443. rx_desc_pool->freelist = *local_desc_list;
  444. (*tail)->next = temp_list;
  445. *tail = NULL;
  446. *local_desc_list = NULL;
  447. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  448. }
  449. qdf_export_symbol(dp_rx_add_desc_list_to_free_list);