dp_tx_desc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool: Handle to DP tx_desc_pool structure
  43. * @num_elem: Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  91. {
  92. int i;
  93. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  94. uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  95. for (i = 0; i < num_pool; i++) {
  96. tx_desc_pool = &soc->tx_desc[i];
  97. if (tx_desc_pool)
  98. qdf_tx_desc_pool_free_bufs(soc,
  99. &tx_desc_pool->desc_pages,
  100. tx_desc_pool->elem_size,
  101. tx_desc_pool->elem_count,
  102. true, &dp_tx_desc_clean_up,
  103. nbuf_list);
  104. }
  105. }
  106. #endif
  107. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  108. uint32_t num_elem)
  109. {
  110. uint32_t desc_size;
  111. struct dp_tx_desc_pool_s *tx_desc_pool;
  112. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  113. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  114. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  115. dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
  116. &tx_desc_pool->desc_pages,
  117. desc_size, num_elem,
  118. 0, true);
  119. if (!tx_desc_pool->desc_pages.num_pages) {
  120. dp_err("Multi page alloc fail, tx desc");
  121. return QDF_STATUS_E_NOMEM;
  122. }
  123. return QDF_STATUS_SUCCESS;
  124. }
  125. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  126. {
  127. struct dp_tx_desc_pool_s *tx_desc_pool;
  128. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  129. if (tx_desc_pool->desc_pages.num_pages)
  130. dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
  131. &tx_desc_pool->desc_pages, 0,
  132. true);
  133. }
  134. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  135. uint32_t num_elem)
  136. {
  137. struct dp_tx_desc_pool_s *tx_desc_pool;
  138. uint32_t desc_size;
  139. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  140. tx_desc_pool = &soc->tx_desc[pool_id];
  141. if (qdf_mem_multi_page_link(soc->osdev,
  142. &tx_desc_pool->desc_pages,
  143. desc_size, num_elem, true)) {
  144. dp_err("invalid tx desc allocation -overflow num link");
  145. return QDF_STATUS_E_FAULT;
  146. }
  147. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  148. *tx_desc_pool->desc_pages.cacheable_pages;
  149. /* Set unique IDs for each Tx descriptor */
  150. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  151. soc, num_elem, pool_id)) {
  152. dp_err("initialization per target failed");
  153. return QDF_STATUS_E_FAULT;
  154. }
  155. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  156. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  157. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  158. return QDF_STATUS_SUCCESS;
  159. }
  160. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  161. {
  162. struct dp_tx_desc_pool_s *tx_desc_pool;
  163. tx_desc_pool = &soc->tx_desc[pool_id];
  164. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  165. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  166. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  167. }
  168. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  169. uint32_t num_elem)
  170. {
  171. QDF_STATUS status = QDF_STATUS_SUCCESS;
  172. qdf_dma_context_t memctx = 0;
  173. uint8_t pool_id, count;
  174. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  175. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  176. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  177. /* Coherent tx extension descriptor alloc */
  178. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  179. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  180. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  181. dp_desc_multi_pages_mem_alloc(
  182. soc, DP_TX_EXT_DESC_TYPE,
  183. &dp_tx_ext_desc_pool->desc_pages,
  184. elem_size,
  185. num_elem,
  186. memctx, false);
  187. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  189. "ext desc page alloc fail");
  190. status = QDF_STATUS_E_NOMEM;
  191. goto fail_exit;
  192. }
  193. }
  194. /*
  195. * Cacheable ext descriptor link alloc
  196. * This structure also large size already
  197. * single element is 24bytes, 2K elements are 48Kbytes
  198. * Have to alloc multi page cacheable memory
  199. */
  200. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  201. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  202. dp_desc_multi_pages_mem_alloc(
  203. soc,
  204. DP_TX_EXT_DESC_LINK_TYPE,
  205. &dp_tx_ext_desc_pool->desc_link_pages,
  206. link_elem_size,
  207. num_elem,
  208. 0, true);
  209. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  210. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  211. "ext link desc page alloc fail");
  212. status = QDF_STATUS_E_NOMEM;
  213. goto free_ext_desc_page;
  214. }
  215. }
  216. return status;
  217. free_ext_desc_page:
  218. for (count = 0; count < pool_id; count++) {
  219. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  220. dp_desc_multi_pages_mem_free(
  221. soc, DP_TX_EXT_DESC_LINK_TYPE,
  222. &dp_tx_ext_desc_pool->desc_link_pages,
  223. 0, true);
  224. }
  225. pool_id = num_pool;
  226. fail_exit:
  227. for (count = 0; count < pool_id; count++) {
  228. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  229. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  230. dp_desc_multi_pages_mem_free(
  231. soc, DP_TX_EXT_DESC_TYPE,
  232. &dp_tx_ext_desc_pool->desc_pages,
  233. memctx, false);
  234. }
  235. return status;
  236. }
  237. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  238. uint32_t num_elem)
  239. {
  240. uint32_t i;
  241. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  242. struct qdf_mem_dma_page_t *page_info;
  243. struct qdf_mem_multi_page_t *pages;
  244. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  245. uint8_t pool_id;
  246. QDF_STATUS status;
  247. /* link tx descriptors into a freelist */
  248. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  249. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  250. soc->tx_ext_desc[pool_id].elem_size =
  251. HAL_TX_EXT_DESC_WITH_META_DATA;
  252. soc->tx_ext_desc[pool_id].link_elem_size =
  253. sizeof(struct dp_tx_ext_desc_elem_s);
  254. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  255. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  256. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  257. if (qdf_mem_multi_page_link(soc->osdev,
  258. &dp_tx_ext_desc_pool->
  259. desc_link_pages,
  260. dp_tx_ext_desc_pool->link_elem_size,
  261. dp_tx_ext_desc_pool->elem_count,
  262. true)) {
  263. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  264. "ext link desc page linking fail");
  265. status = QDF_STATUS_E_FAULT;
  266. goto fail;
  267. }
  268. /* Assign coherent memory pointer into linked free list */
  269. pages = &dp_tx_ext_desc_pool->desc_pages;
  270. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  271. c_elem = dp_tx_ext_desc_pool->freelist;
  272. p_elem = c_elem;
  273. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  274. if (!(i % pages->num_element_per_page)) {
  275. /*
  276. * First element for new page,
  277. * should point next page
  278. */
  279. if (!pages->dma_pages->page_v_addr_start) {
  280. QDF_TRACE(QDF_MODULE_ID_DP,
  281. QDF_TRACE_LEVEL_ERROR,
  282. "link over flow");
  283. status = QDF_STATUS_E_FAULT;
  284. goto fail;
  285. }
  286. c_elem->vaddr =
  287. (void *)page_info->page_v_addr_start;
  288. c_elem->paddr = page_info->page_p_addr;
  289. page_info++;
  290. } else {
  291. c_elem->vaddr = (void *)(p_elem->vaddr +
  292. dp_tx_ext_desc_pool->elem_size);
  293. c_elem->paddr = (p_elem->paddr +
  294. dp_tx_ext_desc_pool->elem_size);
  295. }
  296. p_elem = c_elem;
  297. c_elem = c_elem->next;
  298. if (!c_elem)
  299. break;
  300. }
  301. dp_tx_ext_desc_pool->num_free = num_elem;
  302. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  303. }
  304. return QDF_STATUS_SUCCESS;
  305. fail:
  306. return status;
  307. }
  308. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  309. {
  310. uint8_t pool_id;
  311. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  312. qdf_dma_context_t memctx = 0;
  313. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  314. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  315. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  316. dp_desc_multi_pages_mem_free(
  317. soc, DP_TX_EXT_DESC_LINK_TYPE,
  318. &dp_tx_ext_desc_pool->desc_link_pages,
  319. 0, true);
  320. dp_desc_multi_pages_mem_free(
  321. soc, DP_TX_EXT_DESC_TYPE,
  322. &dp_tx_ext_desc_pool->desc_pages,
  323. memctx, false);
  324. }
  325. }
  326. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  327. {
  328. uint8_t pool_id;
  329. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  330. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  331. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  332. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  333. }
  334. }
  335. #if defined(FEATURE_TSO)
  336. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  337. uint32_t num_elem)
  338. {
  339. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  340. uint32_t desc_size, pool_id, i;
  341. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  342. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  343. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  344. tso_desc_pool->num_free = 0;
  345. dp_desc_multi_pages_mem_alloc(
  346. soc,
  347. DP_TX_TSO_DESC_TYPE,
  348. &tso_desc_pool->desc_pages,
  349. desc_size,
  350. num_elem, 0, true);
  351. if (!tso_desc_pool->desc_pages.num_pages) {
  352. dp_err("Multi page alloc fail, tx desc");
  353. goto fail;
  354. }
  355. }
  356. return QDF_STATUS_SUCCESS;
  357. fail:
  358. for (i = 0; i < pool_id; i++) {
  359. tso_desc_pool = &soc->tx_tso_desc[i];
  360. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  361. &tso_desc_pool->desc_pages,
  362. 0, true);
  363. }
  364. return QDF_STATUS_E_NOMEM;
  365. }
  366. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  367. {
  368. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  369. uint32_t pool_id;
  370. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  371. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  372. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  373. &tso_desc_pool->desc_pages,
  374. 0, true);
  375. }
  376. }
  377. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  378. uint32_t num_elem)
  379. {
  380. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  381. uint32_t desc_size, pool_id;
  382. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  383. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  384. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  385. if (qdf_mem_multi_page_link(soc->osdev,
  386. &tso_desc_pool->desc_pages,
  387. desc_size,
  388. num_elem, true)) {
  389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  390. "invalid tso desc allocation - overflow num link");
  391. return QDF_STATUS_E_FAULT;
  392. }
  393. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  394. *tso_desc_pool->desc_pages.cacheable_pages;
  395. tso_desc_pool->num_free = num_elem;
  396. TSO_DEBUG("Number of free descriptors: %u\n",
  397. tso_desc_pool->num_free);
  398. tso_desc_pool->pool_size = num_elem;
  399. qdf_spinlock_create(&tso_desc_pool->lock);
  400. }
  401. return QDF_STATUS_SUCCESS;
  402. }
  403. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  404. {
  405. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  406. uint32_t pool_id;
  407. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  408. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  409. qdf_spin_lock_bh(&tso_desc_pool->lock);
  410. tso_desc_pool->freelist = NULL;
  411. tso_desc_pool->num_free = 0;
  412. tso_desc_pool->pool_size = 0;
  413. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  414. qdf_spinlock_destroy(&tso_desc_pool->lock);
  415. }
  416. }
  417. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  418. uint32_t num_elem)
  419. {
  420. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  421. uint32_t desc_size, pool_id, i;
  422. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  423. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  424. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  425. tso_num_seg_pool->num_free = 0;
  426. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
  427. &tso_num_seg_pool->desc_pages,
  428. desc_size,
  429. num_elem, 0, true);
  430. if (!tso_num_seg_pool->desc_pages.num_pages) {
  431. dp_err("Multi page alloc fail, tso_num_seg_pool");
  432. goto fail;
  433. }
  434. }
  435. return QDF_STATUS_SUCCESS;
  436. fail:
  437. for (i = 0; i < pool_id; i++) {
  438. tso_num_seg_pool = &soc->tx_tso_num_seg[i];
  439. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  440. &tso_num_seg_pool->desc_pages,
  441. 0, true);
  442. }
  443. return QDF_STATUS_E_NOMEM;
  444. }
  445. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  446. {
  447. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  448. uint32_t pool_id;
  449. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  450. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  451. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  452. &tso_num_seg_pool->desc_pages,
  453. 0, true);
  454. }
  455. }
  456. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  457. uint32_t num_elem)
  458. {
  459. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  460. uint32_t desc_size, pool_id;
  461. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  462. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  463. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  464. if (qdf_mem_multi_page_link(soc->osdev,
  465. &tso_num_seg_pool->desc_pages,
  466. desc_size,
  467. num_elem, true)) {
  468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  469. "invalid tso desc allocation - overflow num link");
  470. return QDF_STATUS_E_FAULT;
  471. }
  472. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  473. *tso_num_seg_pool->desc_pages.cacheable_pages;
  474. tso_num_seg_pool->num_free = num_elem;
  475. tso_num_seg_pool->num_seg_pool_size = num_elem;
  476. qdf_spinlock_create(&tso_num_seg_pool->lock);
  477. }
  478. return QDF_STATUS_SUCCESS;
  479. }
  480. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  481. {
  482. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  483. uint32_t pool_id;
  484. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  485. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  486. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  487. tso_num_seg_pool->freelist = NULL;
  488. tso_num_seg_pool->num_free = 0;
  489. tso_num_seg_pool->num_seg_pool_size = 0;
  490. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  491. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  492. }
  493. }
  494. #else
  495. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  496. uint32_t num_elem)
  497. {
  498. return QDF_STATUS_SUCCESS;
  499. }
  500. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  501. uint32_t num_elem)
  502. {
  503. return QDF_STATUS_SUCCESS;
  504. }
  505. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  506. {
  507. }
  508. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  509. {
  510. }
  511. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  512. uint32_t num_elem)
  513. {
  514. return QDF_STATUS_SUCCESS;
  515. }
  516. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  517. {
  518. }
  519. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  520. uint32_t num_elem)
  521. {
  522. return QDF_STATUS_SUCCESS;
  523. }
  524. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  525. {
  526. }
  527. #endif