dp_rx_desc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_ipa.h"
  22. #include <qdf_module.h>
  23. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  24. A_COMPILE_TIME_ASSERT(cookie_size_check,
  25. PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
  26. 1 << DP_RX_DESC_PAGE_ID_SHIFT);
  27. /*
  28. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  29. * rx descriptor pool
  30. *
  31. * @rx_desc_pool: rx descriptor pool pointer
  32. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  33. * QDF_STATUS_E_NOMEM
  34. */
  35. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  36. {
  37. if (!rx_desc_pool->desc_pages.num_pages) {
  38. dp_err("Multi page alloc fail, size=%d, elem=%d",
  39. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  40. return QDF_STATUS_E_NOMEM;
  41. }
  42. return QDF_STATUS_SUCCESS;
  43. }
  44. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  45. /*
  46. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  47. * descriptors
  48. *
  49. * @soc: core txrx main context
  50. * @num_elem: number of rx descriptors (size of the pool)
  51. * @rx_desc_pool: rx descriptor pool pointer
  52. *
  53. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  54. * QDF_STATUS_E_NOMEM
  55. * QDF_STATUS_E_FAULT
  56. */
  57. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  58. uint32_t num_elem,
  59. struct rx_desc_pool *rx_desc_pool)
  60. {
  61. uint32_t desc_size;
  62. union dp_rx_desc_list_elem_t *rx_desc_elem;
  63. desc_size = sizeof(*rx_desc_elem);
  64. rx_desc_pool->elem_size = desc_size;
  65. dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
  66. &rx_desc_pool->desc_pages,
  67. desc_size, num_elem, 0, true);
  68. if (!rx_desc_pool->desc_pages.num_pages) {
  69. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  70. desc_size, num_elem);
  71. return QDF_STATUS_E_NOMEM;
  72. }
  73. if (qdf_mem_multi_page_link(soc->osdev,
  74. &rx_desc_pool->desc_pages,
  75. desc_size, num_elem, true)) {
  76. qdf_err("overflow num link,size=%d, elem=%d",
  77. desc_size, num_elem);
  78. goto free_rx_desc_pool;
  79. }
  80. return QDF_STATUS_SUCCESS;
  81. free_rx_desc_pool:
  82. dp_rx_desc_pool_free(soc, rx_desc_pool);
  83. return QDF_STATUS_E_FAULT;
  84. }
  85. qdf_export_symbol(dp_rx_desc_pool_alloc);
  86. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  87. struct rx_desc_pool *rx_desc_pool,
  88. uint32_t pool_id)
  89. {
  90. uint32_t id, page_id, offset, num_desc_per_page;
  91. uint32_t count = 0;
  92. union dp_rx_desc_list_elem_t *rx_desc_elem;
  93. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  94. rx_desc_elem = rx_desc_pool->freelist;
  95. while (rx_desc_elem) {
  96. page_id = count / num_desc_per_page;
  97. offset = count % num_desc_per_page;
  98. /*
  99. * Below cookie size is from REO destination ring
  100. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  101. * cookie size = 21 bits
  102. * 8 bits - offset
  103. * 8 bits - page ID
  104. * 4 bits - pool ID
  105. */
  106. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  107. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  108. offset);
  109. rx_desc_elem->rx_desc.cookie = id;
  110. rx_desc_elem->rx_desc.pool_id = pool_id;
  111. rx_desc_elem->rx_desc.in_use = 0;
  112. rx_desc_elem = rx_desc_elem->next;
  113. count++;
  114. }
  115. return QDF_STATUS_SUCCESS;
  116. }
  117. /*
  118. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  119. * convert the pool of memory into a list of
  120. * rx descriptors and create locks to access this
  121. * list of rx descriptors.
  122. *
  123. * @soc: core txrx main context
  124. * @pool_id: pool_id which is one of 3 mac_ids
  125. * @pool_size: size of the rx descriptor pool
  126. * @rx_desc_pool: rx descriptor pool pointer
  127. */
  128. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  129. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  130. {
  131. QDF_STATUS status;
  132. /* Initialize the lock */
  133. qdf_spinlock_create(&rx_desc_pool->lock);
  134. qdf_spin_lock_bh(&rx_desc_pool->lock);
  135. rx_desc_pool->pool_size = pool_size;
  136. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  137. *rx_desc_pool->desc_pages.cacheable_pages;
  138. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  139. pool_id);
  140. if (!QDF_IS_STATUS_SUCCESS(status))
  141. dp_err("RX desc pool initialization failed");
  142. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  143. }
  144. qdf_export_symbol(dp_rx_desc_pool_init);
  145. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  146. struct rx_desc_pool *rx_desc_pool)
  147. {
  148. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  149. rx_desc_pool->elem_size * offset;
  150. }
  151. static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
  152. struct rx_desc_pool *rx_desc_pool,
  153. qdf_nbuf_t *nbuf_unmap_list,
  154. qdf_nbuf_t *nbuf_free_list)
  155. {
  156. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  157. union dp_rx_desc_list_elem_t *rx_desc_elem;
  158. struct dp_rx_desc *rx_desc;
  159. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
  160. qdf_err("No pages found on this desc pool");
  161. return QDF_STATUS_E_INVAL;
  162. }
  163. num_desc = rx_desc_pool->pool_size;
  164. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  165. for (i = 0; i < num_desc; i++) {
  166. page_id = i / num_desc_per_page;
  167. offset = i % num_desc_per_page;
  168. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  169. rx_desc = &rx_desc_elem->rx_desc;
  170. dp_rx_desc_free_dbg_info(rx_desc);
  171. if (rx_desc->in_use) {
  172. if (!rx_desc->unmapped) {
  173. DP_RX_HEAD_APPEND(*nbuf_unmap_list,
  174. rx_desc->nbuf);
  175. rx_desc->unmapped = 1;
  176. } else {
  177. DP_RX_HEAD_APPEND(*nbuf_free_list,
  178. rx_desc->nbuf);
  179. }
  180. }
  181. }
  182. return QDF_STATUS_SUCCESS;
  183. }
  184. static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
  185. qdf_nbuf_t nbuf_unmap_list,
  186. qdf_nbuf_t nbuf_free_list,
  187. uint16_t buf_size)
  188. {
  189. qdf_nbuf_t nbuf = nbuf_unmap_list;
  190. qdf_nbuf_t next;
  191. while (nbuf) {
  192. next = nbuf->next;
  193. if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
  194. false))
  195. dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
  196. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  197. QDF_DMA_BIDIRECTIONAL, buf_size);
  198. dp_rx_nbuf_free(nbuf);
  199. nbuf = next;
  200. }
  201. nbuf = nbuf_free_list;
  202. while (nbuf) {
  203. next = nbuf->next;
  204. dp_rx_nbuf_free(nbuf);
  205. nbuf = next;
  206. }
  207. }
  208. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  209. struct rx_desc_pool *rx_desc_pool)
  210. {
  211. qdf_nbuf_t nbuf_unmap_list = NULL;
  212. qdf_nbuf_t nbuf_free_list = NULL;
  213. qdf_spin_lock_bh(&rx_desc_pool->lock);
  214. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  215. &nbuf_unmap_list, &nbuf_free_list);
  216. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  217. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  218. rx_desc_pool->buf_size);
  219. qdf_spinlock_destroy(&rx_desc_pool->lock);
  220. }
  221. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  222. struct rx_desc_pool *rx_desc_pool)
  223. {
  224. qdf_nbuf_t nbuf_unmap_list = NULL;
  225. qdf_nbuf_t nbuf_free_list = NULL;
  226. qdf_spin_lock_bh(&rx_desc_pool->lock);
  227. dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
  228. &nbuf_unmap_list, &nbuf_free_list);
  229. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  230. dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
  231. rx_desc_pool->buf_size);
  232. }
  233. qdf_export_symbol(dp_rx_desc_nbuf_free);
  234. void dp_rx_desc_pool_free(struct dp_soc *soc,
  235. struct rx_desc_pool *rx_desc_pool)
  236. {
  237. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  238. return;
  239. dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
  240. &rx_desc_pool->desc_pages, 0, true);
  241. }
  242. qdf_export_symbol(dp_rx_desc_pool_free);
  243. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  244. struct rx_desc_pool *rx_desc_pool,
  245. uint32_t pool_id)
  246. {
  247. qdf_spin_lock_bh(&rx_desc_pool->lock);
  248. rx_desc_pool->freelist = NULL;
  249. rx_desc_pool->pool_size = 0;
  250. /* Deinitialize rx mon desr frag flag */
  251. rx_desc_pool->rx_mon_dest_frag_enable = false;
  252. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  253. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  254. qdf_spinlock_destroy(&rx_desc_pool->lock);
  255. }
  256. qdf_export_symbol(dp_rx_desc_pool_deinit);
  257. #else
  258. /*
  259. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  260. * rx descriptor pool
  261. *
  262. * @rx_desc_pool: rx descriptor pool pointer
  263. *
  264. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  265. * QDF_STATUS_E_NOMEM
  266. */
  267. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  268. {
  269. if (!rx_desc_pool->array) {
  270. dp_err("nss-wifi<4> skip Rx refil");
  271. return QDF_STATUS_E_NOMEM;
  272. }
  273. return QDF_STATUS_SUCCESS;
  274. }
  275. qdf_export_symbol(dp_rx_desc_pool_is_allocated);
  276. /*
  277. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  278. * descriptors
  279. *
  280. * @soc: core txrx main context
  281. * @num_elem: number of rx descriptors (size of the pool)
  282. * @rx_desc_pool: rx descriptor pool pointer
  283. *
  284. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  285. * QDF_STATUS_E_NOMEM
  286. * QDF_STATUS_E_FAULT
  287. */
  288. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  289. uint32_t pool_size,
  290. struct rx_desc_pool *rx_desc_pool)
  291. {
  292. rx_desc_pool->array = qdf_mem_malloc(pool_size *
  293. sizeof(union dp_rx_desc_list_elem_t));
  294. if (!(rx_desc_pool->array)) {
  295. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  296. "RX Desc Pool allocation failed");
  297. return QDF_STATUS_E_NOMEM;
  298. }
  299. return QDF_STATUS_SUCCESS;
  300. }
  301. qdf_export_symbol(dp_rx_desc_pool_alloc);
  302. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  303. struct rx_desc_pool *rx_desc_pool,
  304. uint32_t pool_id)
  305. {
  306. int i;
  307. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  308. if (i == rx_desc_pool->pool_size - 1)
  309. rx_desc_pool->array[i].next = NULL;
  310. else
  311. rx_desc_pool->array[i].next =
  312. &rx_desc_pool->array[i + 1];
  313. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  314. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  315. rx_desc_pool->array[i].rx_desc.in_use = 0;
  316. }
  317. return QDF_STATUS_SUCCESS;
  318. }
  319. /*
  320. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  321. * convert the pool of memory into a list of
  322. * rx descriptors and create locks to access this
  323. * list of rx descriptors.
  324. *
  325. * @soc: core txrx main context
  326. * @pool_id: pool_id which is one of 3 mac_ids
  327. * @pool_size: size of the rx descriptor pool
  328. * @rx_desc_pool: rx descriptor pool pointer
  329. */
  330. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  331. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  332. {
  333. QDF_STATUS status;
  334. /* Initialize the lock */
  335. qdf_spinlock_create(&rx_desc_pool->lock);
  336. qdf_spin_lock_bh(&rx_desc_pool->lock);
  337. rx_desc_pool->pool_size = pool_size;
  338. /* link SW rx descs into a freelist */
  339. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  340. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  341. status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
  342. pool_id);
  343. if (!QDF_IS_STATUS_SUCCESS(status))
  344. dp_err("RX desc pool initialization failed");
  345. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  346. }
  347. qdf_export_symbol(dp_rx_desc_pool_init);
  348. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  349. struct rx_desc_pool *rx_desc_pool)
  350. {
  351. qdf_nbuf_t nbuf;
  352. int i;
  353. qdf_spin_lock_bh(&rx_desc_pool->lock);
  354. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  355. if (rx_desc_pool->array[i].rx_desc.in_use) {
  356. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  357. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  358. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  359. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  360. }
  361. dp_rx_nbuf_free(nbuf);
  362. }
  363. }
  364. qdf_mem_free(rx_desc_pool->array);
  365. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  366. qdf_spinlock_destroy(&rx_desc_pool->lock);
  367. }
  368. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  369. struct rx_desc_pool *rx_desc_pool)
  370. {
  371. qdf_nbuf_t nbuf;
  372. int i;
  373. qdf_spin_lock_bh(&rx_desc_pool->lock);
  374. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  375. if (rx_desc_pool->array[i].rx_desc.in_use) {
  376. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  377. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  378. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  379. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  380. }
  381. dp_rx_nbuf_free(nbuf);
  382. }
  383. }
  384. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  385. }
  386. qdf_export_symbol(dp_rx_desc_nbuf_free);
  387. /**
  388. * dp_rx_desc_frag_free() - Free desc frag buffer
  389. *
  390. * @soc: core txrx main context
  391. * @rx_desc_pool: rx descriptor pool pointer
  392. *
  393. * Return: None
  394. */
  395. #ifdef DP_RX_MON_MEM_FRAG
  396. void dp_rx_desc_frag_free(struct dp_soc *soc,
  397. struct rx_desc_pool *rx_desc_pool)
  398. {
  399. qdf_dma_addr_t paddr;
  400. qdf_frag_t vaddr;
  401. int i;
  402. qdf_spin_lock_bh(&rx_desc_pool->lock);
  403. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  404. if (rx_desc_pool->array[i].rx_desc.in_use) {
  405. paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
  406. vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
  407. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  408. qdf_mem_unmap_page(soc->osdev, paddr,
  409. rx_desc_pool->buf_size,
  410. QDF_DMA_FROM_DEVICE);
  411. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  412. }
  413. qdf_frag_free(vaddr);
  414. }
  415. }
  416. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  417. }
  418. qdf_export_symbol(dp_rx_desc_frag_free);
  419. #endif
  420. void dp_rx_desc_pool_free(struct dp_soc *soc,
  421. struct rx_desc_pool *rx_desc_pool)
  422. {
  423. qdf_mem_free(rx_desc_pool->array);
  424. }
  425. qdf_export_symbol(dp_rx_desc_pool_free);
  426. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  427. struct rx_desc_pool *rx_desc_pool,
  428. uint32_t pool_id)
  429. {
  430. qdf_spin_lock_bh(&rx_desc_pool->lock);
  431. rx_desc_pool->freelist = NULL;
  432. rx_desc_pool->pool_size = 0;
  433. /* Deinitialize rx mon desr frag flag */
  434. rx_desc_pool->rx_mon_dest_frag_enable = false;
  435. soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
  436. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  437. qdf_spinlock_destroy(&rx_desc_pool->lock);
  438. }
  439. qdf_export_symbol(dp_rx_desc_pool_deinit);
  440. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  441. void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
  442. struct rx_desc_pool *rx_desc_pool,
  443. uint32_t pool_id)
  444. {
  445. }
  446. /*
  447. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  448. * the free rx desc pool.
  449. *
  450. * @soc: core txrx main context
  451. * @pool_id: pool_id which is one of 3 mac_ids
  452. * @rx_desc_pool: rx descriptor pool pointer
  453. * @num_descs: number of descs requested from freelist
  454. * @desc_list: attach the descs to this list (output parameter)
  455. * @tail: attach the point to last desc of free list (output parameter)
  456. *
  457. * Return: number of descs allocated from free list.
  458. */
  459. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  460. struct rx_desc_pool *rx_desc_pool,
  461. uint16_t num_descs,
  462. union dp_rx_desc_list_elem_t **desc_list,
  463. union dp_rx_desc_list_elem_t **tail)
  464. {
  465. uint16_t count;
  466. qdf_spin_lock_bh(&rx_desc_pool->lock);
  467. *desc_list = *tail = rx_desc_pool->freelist;
  468. for (count = 0; count < num_descs; count++) {
  469. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  470. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  471. return count;
  472. }
  473. *tail = rx_desc_pool->freelist;
  474. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  475. }
  476. (*tail)->next = NULL;
  477. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  478. return count;
  479. }
  480. qdf_export_symbol(dp_rx_get_free_desc_list);
  481. /*
  482. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  483. * freelist.
  484. *
  485. * @soc: core txrx main context
  486. * @local_desc_list: local desc list provided by the caller
  487. * @tail: attach the point to last desc of local desc list
  488. * @pool_id: pool_id which is one of 3 mac_ids
  489. * @rx_desc_pool: rx descriptor pool pointer
  490. */
  491. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  492. union dp_rx_desc_list_elem_t **local_desc_list,
  493. union dp_rx_desc_list_elem_t **tail,
  494. uint16_t pool_id,
  495. struct rx_desc_pool *rx_desc_pool)
  496. {
  497. union dp_rx_desc_list_elem_t *temp_list = NULL;
  498. qdf_spin_lock_bh(&rx_desc_pool->lock);
  499. temp_list = rx_desc_pool->freelist;
  500. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  501. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  502. temp_list, *local_desc_list, *tail, (*tail)->next);
  503. rx_desc_pool->freelist = *local_desc_list;
  504. (*tail)->next = temp_list;
  505. *tail = NULL;
  506. *local_desc_list = NULL;
  507. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  508. }
  509. qdf_export_symbol(dp_rx_add_desc_list_to_free_list);