dp_rx_desc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_ipa.h"
  22. #include <qdf_module.h>
  23. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  24. A_COMPILE_TIME_ASSERT(cookie_size_check,
  25. (DP_BLOCKMEM_SIZE /
  26. sizeof(union dp_rx_desc_list_elem_t))
  27. <= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
  28. /*
  29. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  30. * rx descriptor pool
  31. *
  32. * @rx_desc_pool: rx descriptor pool pointer
  33. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  34. * QDF_STATUS_E_NOMEM
  35. */
  36. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  37. {
  38. if (!rx_desc_pool->desc_pages.num_pages) {
  39. dp_err("Multi page alloc fail, size=%d, elem=%d",
  40. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  41. return QDF_STATUS_E_NOMEM;
  42. }
  43. return QDF_STATUS_SUCCESS;
  44. }
  45. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  46. /*
  47. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  48. * descriptors
  49. *
  50. * @soc: core txrx main context
  51. * @num_elem: number of rx descriptors (size of the pool)
  52. * @rx_desc_pool: rx descriptor pool pointer
  53. *
  54. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  55. * QDF_STATUS_E_NOMEM
  56. * QDF_STATUS_E_FAULT
  57. */
  58. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  59. uint32_t num_elem,
  60. struct rx_desc_pool *rx_desc_pool)
  61. {
  62. uint32_t desc_size;
  63. union dp_rx_desc_list_elem_t *rx_desc_elem;
  64. desc_size = sizeof(*rx_desc_elem);
  65. rx_desc_pool->elem_size = desc_size;
  66. rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  67. dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
  68. &rx_desc_pool->desc_pages,
  69. desc_size, num_elem, 0, true);
  70. if (!rx_desc_pool->desc_pages.num_pages) {
  71. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  72. desc_size, num_elem);
  73. return QDF_STATUS_E_NOMEM;
  74. }
  75. if (qdf_mem_multi_page_link(soc->osdev,
  76. &rx_desc_pool->desc_pages,
  77. desc_size, num_elem, true)) {
  78. qdf_err("overflow num link,size=%d, elem=%d",
  79. desc_size, num_elem);
  80. goto free_rx_desc_pool;
  81. }
  82. return QDF_STATUS_SUCCESS;
  83. free_rx_desc_pool:
  84. dp_rx_desc_pool_free(soc, rx_desc_pool);
  85. return QDF_STATUS_E_FAULT;
  86. }
  87. qdf_export_symbol(dp_rx_desc_pool_alloc);
  88. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  89. struct rx_desc_pool *rx_desc_pool,
  90. uint32_t pool_id)
  91. {
  92. uint32_t id, page_id, offset, num_desc_per_page;
  93. uint32_t count = 0;
  94. union dp_rx_desc_list_elem_t *rx_desc_elem;
  95. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  96. rx_desc_elem = rx_desc_pool->freelist;
  97. while (rx_desc_elem) {
  98. page_id = count / num_desc_per_page;
  99. offset = count % num_desc_per_page;
  100. /*
  101. * Below cookie size is from REO destination ring
  102. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  103. * cookie size = 21 bits
  104. * 8 bits - offset
  105. * 8 bits - page ID
  106. * 4 bits - pool ID
  107. */
  108. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  109. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  110. offset);
  111. rx_desc_elem->rx_desc.cookie = id;
  112. rx_desc_elem->rx_desc.pool_id = pool_id;
  113. rx_desc_elem->rx_desc.in_use = 0;
  114. rx_desc_elem = rx_desc_elem->next;
  115. count++;
  116. }
  117. return QDF_STATUS_SUCCESS;
  118. }
  119. /*
  120. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  121. * convert the pool of memory into a list of
  122. * rx descriptors and create locks to access this
  123. * list of rx descriptors.
  124. *
  125. * @soc: core txrx main context
  126. * @pool_id: pool_id which is one of 3 mac_ids
  127. * @pool_size: size of the rx descriptor pool
  128. * @rx_desc_pool: rx descriptor pool pointer
  129. */
  130. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  131. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  132. {
  133. QDF_STATUS status;
  134. /* Initialize the lock */
  135. qdf_spinlock_create(&rx_desc_pool->lock);
  136. qdf_spin_lock_bh(&rx_desc_pool->lock);
  137. rx_desc_pool->pool_size = pool_size;
  138. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  139. *rx_desc_pool->desc_pages.cacheable_pages;
  140. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  141. pool_id);
  142. if (!QDF_IS_STATUS_SUCCESS(status))
  143. dp_err("RX desc pool initialization failed");
  144. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  145. }
  146. qdf_export_symbol(dp_rx_desc_pool_init);
  147. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  148. struct rx_desc_pool *rx_desc_pool)
  149. {
  150. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  151. rx_desc_pool->elem_size * offset;
  152. }
  153. static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
  154. struct rx_desc_pool *rx_desc_pool,
  155. qdf_nbuf_t *nbuf_unmap_list,
  156. qdf_nbuf_t *nbuf_free_list)
  157. {
  158. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  159. union dp_rx_desc_list_elem_t *rx_desc_elem;
  160. struct dp_rx_desc *rx_desc;
  161. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
  162. qdf_err("No pages found on this desc pool");
  163. return QDF_STATUS_E_INVAL;
  164. }
  165. num_desc = rx_desc_pool->pool_size;
  166. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  167. for (i = 0; i < num_desc; i++) {
  168. page_id = i / num_desc_per_page;
  169. offset = i % num_desc_per_page;
  170. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  171. rx_desc = &rx_desc_elem->rx_desc;
  172. dp_rx_desc_free_dbg_info(rx_desc);
  173. if (rx_desc->in_use) {
  174. if (!rx_desc->unmapped) {
  175. DP_RX_HEAD_APPEND(*nbuf_unmap_list,
  176. rx_desc->nbuf);
  177. rx_desc->unmapped = 1;
  178. } else {
  179. DP_RX_HEAD_APPEND(*nbuf_free_list,
  180. rx_desc->nbuf);
  181. }
  182. }
  183. }
  184. return QDF_STATUS_SUCCESS;
  185. }
  186. static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
  187. qdf_nbuf_t nbuf_unmap_list,
  188. qdf_nbuf_t nbuf_free_list,
  189. uint16_t buf_size,
  190. bool is_mon_pool)
  191. {
  192. qdf_nbuf_t nbuf = nbuf_unmap_list;
  193. qdf_nbuf_t next;
  194. while (nbuf) {
  195. next = nbuf->next;
  196. if (!is_mon_pool)
  197. dp_audio_smmu_unmap(soc->osdev,
  198. QDF_NBUF_CB_PADDR(nbuf),
  199. buf_size);
  200. if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
  201. false, __func__, __LINE__))
  202. dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
  203. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  204. QDF_DMA_BIDIRECTIONAL, buf_size);
  205. dp_rx_nbuf_free(nbuf);
  206. nbuf = next;
  207. }
  208. nbuf = nbuf_free_list;
  209. while (nbuf) {
  210. next = nbuf->next;
  211. dp_rx_nbuf_free(nbuf);
  212. nbuf = next;
  213. }
  214. }
  215. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  216. struct rx_desc_pool *rx_desc_pool)
  217. {
  218. qdf_nbuf_t nbuf_unmap_list = NULL;
  219. qdf_nbuf_t nbuf_free_list = NULL;
  220. qdf_spin_lock_bh(&rx_desc_pool->lock);
  221. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  222. &nbuf_unmap_list, &nbuf_free_list);
  223. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  224. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  225. rx_desc_pool->buf_size, false);
  226. qdf_spinlock_destroy(&rx_desc_pool->lock);
  227. }
  228. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  229. struct rx_desc_pool *rx_desc_pool,
  230. bool is_mon_pool)
  231. {
  232. qdf_nbuf_t nbuf_unmap_list = NULL;
  233. qdf_nbuf_t nbuf_free_list = NULL;
  234. qdf_spin_lock_bh(&rx_desc_pool->lock);
  235. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  236. &nbuf_unmap_list, &nbuf_free_list);
  237. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  238. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  239. rx_desc_pool->buf_size, is_mon_pool);
  240. }
  241. qdf_export_symbol(dp_rx_desc_nbuf_free);
  242. void dp_rx_desc_pool_free(struct dp_soc *soc,
  243. struct rx_desc_pool *rx_desc_pool)
  244. {
  245. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  246. return;
  247. dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
  248. &rx_desc_pool->desc_pages, 0, true);
  249. }
  250. qdf_export_symbol(dp_rx_desc_pool_free);
  251. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  252. struct rx_desc_pool *rx_desc_pool,
  253. uint32_t pool_id)
  254. {
  255. qdf_spin_lock_bh(&rx_desc_pool->lock);
  256. rx_desc_pool->freelist = NULL;
  257. rx_desc_pool->pool_size = 0;
  258. /* Deinitialize rx mon desr frag flag */
  259. rx_desc_pool->rx_mon_dest_frag_enable = false;
  260. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  261. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  262. qdf_spinlock_destroy(&rx_desc_pool->lock);
  263. }
  264. qdf_export_symbol(dp_rx_desc_pool_deinit);
  265. #else
  266. /*
  267. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  268. * rx descriptor pool
  269. *
  270. * @rx_desc_pool: rx descriptor pool pointer
  271. *
  272. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  273. * QDF_STATUS_E_NOMEM
  274. */
  275. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  276. {
  277. if (!rx_desc_pool->array) {
  278. dp_err("nss-wifi<4> skip Rx refil");
  279. return QDF_STATUS_E_NOMEM;
  280. }
  281. return QDF_STATUS_SUCCESS;
  282. }
  283. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  284. /*
  285. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  286. * descriptors
  287. *
  288. * @soc: core txrx main context
  289. * @num_elem: number of rx descriptors (size of the pool)
  290. * @rx_desc_pool: rx descriptor pool pointer
  291. *
  292. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  293. * QDF_STATUS_E_NOMEM
  294. * QDF_STATUS_E_FAULT
  295. */
  296. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  297. uint32_t pool_size,
  298. struct rx_desc_pool *rx_desc_pool)
  299. {
  300. rx_desc_pool->array = qdf_mem_malloc(pool_size *
  301. sizeof(union dp_rx_desc_list_elem_t));
  302. if (!(rx_desc_pool->array)) {
  303. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  304. "RX Desc Pool allocation failed");
  305. return QDF_STATUS_E_NOMEM;
  306. }
  307. return QDF_STATUS_SUCCESS;
  308. }
  309. qdf_export_symbol(dp_rx_desc_pool_alloc);
  310. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  311. struct rx_desc_pool *rx_desc_pool,
  312. uint32_t pool_id)
  313. {
  314. int i;
  315. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  316. if (i == rx_desc_pool->pool_size - 1)
  317. rx_desc_pool->array[i].next = NULL;
  318. else
  319. rx_desc_pool->array[i].next =
  320. &rx_desc_pool->array[i + 1];
  321. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  322. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  323. rx_desc_pool->array[i].rx_desc.in_use = 0;
  324. }
  325. return QDF_STATUS_SUCCESS;
  326. }
  327. /*
  328. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  329. * convert the pool of memory into a list of
  330. * rx descriptors and create locks to access this
  331. * list of rx descriptors.
  332. *
  333. * @soc: core txrx main context
  334. * @pool_id: pool_id which is one of 3 mac_ids
  335. * @pool_size: size of the rx descriptor pool
  336. * @rx_desc_pool: rx descriptor pool pointer
  337. */
  338. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  339. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  340. {
  341. QDF_STATUS status;
  342. /* Initialize the lock */
  343. qdf_spinlock_create(&rx_desc_pool->lock);
  344. qdf_spin_lock_bh(&rx_desc_pool->lock);
  345. rx_desc_pool->pool_size = pool_size;
  346. /* link SW rx descs into a freelist */
  347. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  348. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  349. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  350. pool_id);
  351. if (!QDF_IS_STATUS_SUCCESS(status))
  352. dp_err("RX desc pool initialization failed");
  353. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  354. }
  355. qdf_export_symbol(dp_rx_desc_pool_init);
  356. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  357. struct rx_desc_pool *rx_desc_pool)
  358. {
  359. qdf_nbuf_t nbuf;
  360. int i;
  361. qdf_spin_lock_bh(&rx_desc_pool->lock);
  362. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  363. if (rx_desc_pool->array[i].rx_desc.in_use) {
  364. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  365. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  366. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  367. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  368. }
  369. dp_rx_nbuf_free(nbuf);
  370. }
  371. }
  372. qdf_mem_free(rx_desc_pool->array);
  373. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  374. qdf_spinlock_destroy(&rx_desc_pool->lock);
  375. }
  376. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  377. struct rx_desc_pool *rx_desc_pool,
  378. bool is_mon_pool)
  379. {
  380. qdf_nbuf_t nbuf;
  381. int i;
  382. qdf_spin_lock_bh(&rx_desc_pool->lock);
  383. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  384. dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
  385. if (rx_desc_pool->array[i].rx_desc.in_use) {
  386. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  387. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  388. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  389. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  390. }
  391. dp_rx_nbuf_free(nbuf);
  392. }
  393. }
  394. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  395. }
  396. qdf_export_symbol(dp_rx_desc_nbuf_free);
  397. /**
  398. * dp_rx_desc_frag_free() - Free desc frag buffer
  399. *
  400. * @soc: core txrx main context
  401. * @rx_desc_pool: rx descriptor pool pointer
  402. *
  403. * Return: None
  404. */
  405. #ifdef DP_RX_MON_MEM_FRAG
  406. void dp_rx_desc_frag_free(struct dp_soc *soc,
  407. struct rx_desc_pool *rx_desc_pool)
  408. {
  409. qdf_dma_addr_t paddr;
  410. qdf_frag_t vaddr;
  411. int i;
  412. qdf_spin_lock_bh(&rx_desc_pool->lock);
  413. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  414. if (rx_desc_pool->array[i].rx_desc.in_use) {
  415. paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
  416. vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
  417. dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
  418. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  419. qdf_mem_unmap_page(soc->osdev, paddr,
  420. rx_desc_pool->buf_size,
  421. QDF_DMA_FROM_DEVICE);
  422. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  423. }
  424. qdf_frag_free(vaddr);
  425. }
  426. }
  427. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  428. }
  429. qdf_export_symbol(dp_rx_desc_frag_free);
  430. #endif
  431. void dp_rx_desc_pool_free(struct dp_soc *soc,
  432. struct rx_desc_pool *rx_desc_pool)
  433. {
  434. qdf_mem_free(rx_desc_pool->array);
  435. }
  436. qdf_export_symbol(dp_rx_desc_pool_free);
  437. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  438. struct rx_desc_pool *rx_desc_pool,
  439. uint32_t pool_id)
  440. {
  441. qdf_spin_lock_bh(&rx_desc_pool->lock);
  442. rx_desc_pool->freelist = NULL;
  443. rx_desc_pool->pool_size = 0;
  444. /* Deinitialize rx mon desr frag flag */
  445. rx_desc_pool->rx_mon_dest_frag_enable = false;
  446. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  447. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  448. qdf_spinlock_destroy(&rx_desc_pool->lock);
  449. }
  450. qdf_export_symbol(dp_rx_desc_pool_deinit);
  451. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  452. void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
  453. struct rx_desc_pool *rx_desc_pool,
  454. uint32_t pool_id)
  455. {
  456. }
  457. /*
  458. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  459. * the free rx desc pool.
  460. *
  461. * @soc: core txrx main context
  462. * @pool_id: pool_id which is one of 3 mac_ids
  463. * @rx_desc_pool: rx descriptor pool pointer
  464. * @num_descs: number of descs requested from freelist
  465. * @desc_list: attach the descs to this list (output parameter)
  466. * @tail: attach the point to last desc of free list (output parameter)
  467. *
  468. * Return: number of descs allocated from free list.
  469. */
  470. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  471. struct rx_desc_pool *rx_desc_pool,
  472. uint16_t num_descs,
  473. union dp_rx_desc_list_elem_t **desc_list,
  474. union dp_rx_desc_list_elem_t **tail)
  475. {
  476. uint16_t count;
  477. qdf_spin_lock_bh(&rx_desc_pool->lock);
  478. *desc_list = *tail = rx_desc_pool->freelist;
  479. for (count = 0; count < num_descs; count++) {
  480. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  481. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  482. return count;
  483. }
  484. *tail = rx_desc_pool->freelist;
  485. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  486. }
  487. (*tail)->next = NULL;
  488. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  489. return count;
  490. }
  491. qdf_export_symbol(dp_rx_get_free_desc_list);
  492. /*
  493. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  494. * freelist.
  495. *
  496. * @soc: core txrx main context
  497. * @local_desc_list: local desc list provided by the caller
  498. * @tail: attach the point to last desc of local desc list
  499. * @pool_id: pool_id which is one of 3 mac_ids
  500. * @rx_desc_pool: rx descriptor pool pointer
  501. */
  502. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  503. union dp_rx_desc_list_elem_t **local_desc_list,
  504. union dp_rx_desc_list_elem_t **tail,
  505. uint16_t pool_id,
  506. struct rx_desc_pool *rx_desc_pool)
  507. {
  508. union dp_rx_desc_list_elem_t *temp_list = NULL;
  509. qdf_spin_lock_bh(&rx_desc_pool->lock);
  510. temp_list = rx_desc_pool->freelist;
  511. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  512. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  513. temp_list, *local_desc_list, *tail, (*tail)->next);
  514. rx_desc_pool->freelist = *local_desc_list;
  515. (*tail)->next = temp_list;
  516. *tail = NULL;
  517. *local_desc_list = NULL;
  518. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  519. }
  520. qdf_export_symbol(dp_rx_add_desc_list_to_free_list);