dp_tx_desc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_tx_desc.h"
  20. #ifndef DESC_PARTITION
  21. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  22. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  23. do { \
  24. uint8_t sig_bit; \
  25. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  26. /* Calculate page divider to find page number */ \
  27. sig_bit = 0; \
  28. while (num_desc_per_page) { \
  29. sig_bit++; \
  30. num_desc_per_page = num_desc_per_page >> 1; \
  31. } \
  32. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  33. } while (0)
  34. #else
  35. #define DP_TX_DESC_SIZE(a) a
  36. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  37. #endif /* DESC_PARTITION */
  38. /**
  39. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  40. * @tx_desc_pool Handle to DP tx_desc_pool structure
  41. * @num_elem Number of descriptor elements per pool
  42. *
  43. * Return: None
  44. */
  45. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  46. static void
  47. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  48. uint16_t num_elem)
  49. {
  50. }
  51. #else
  52. static void
  53. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  54. uint16_t num_elem)
  55. {
  56. tx_desc_pool->num_free = num_elem;
  57. tx_desc_pool->num_allocated = 0;
  58. }
  59. #endif
  60. /**
  61. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  62. * @soc Handle to DP SoC structure
  63. * @num_pool Number of pools to allocate
  64. * @num_elem Number of descriptor elements per pool
  65. *
  66. * This function allocates memory for SW tx descriptors
  67. * (used within host for tx data path).
  68. * The number of tx descriptors required will be large
  69. * since based on number of clients (1024 clients x 3 radios),
  70. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  71. * large.
  72. *
  73. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  74. * function to allocate memory
  75. * in multiple pages. It then iterates through the memory allocated across pages
  76. * and links each descriptor
  77. * to next descriptor, taking care of page boundaries.
  78. *
  79. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  80. * one for each ring;
  81. * This minimizes lock contention when hard_start_xmit is called
  82. * from multiple CPUs.
  83. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  84. * flow control.
  85. *
  86. * Return: Status code. 0 for success.
  87. */
  88. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  89. uint16_t num_elem)
  90. {
  91. uint32_t id, count, page_id, offset, pool_id_32;
  92. uint16_t num_page, num_desc_per_page;
  93. struct dp_tx_desc_s *tx_desc_elem;
  94. uint32_t desc_size;
  95. struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  96. desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  97. tx_desc_pool->elem_size = desc_size;
  98. qdf_mem_multi_pages_alloc(soc->osdev,
  99. &tx_desc_pool->desc_pages, desc_size, num_elem,
  100. 0, true);
  101. if (!tx_desc_pool->desc_pages.num_pages) {
  102. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  103. "Multi page alloc fail, tx desc");
  104. goto fail_exit;
  105. }
  106. num_page = tx_desc_pool->desc_pages.num_pages;
  107. num_desc_per_page =
  108. tx_desc_pool->desc_pages.num_element_per_page;
  109. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  110. *tx_desc_pool->desc_pages.cacheable_pages;
  111. if (qdf_mem_multi_page_link(soc->osdev,
  112. &tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
  113. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  114. "invalid tx desc allocation - overflow num link");
  115. goto free_tx_desc;
  116. }
  117. /* Set unique IDs for each Tx descriptor */
  118. tx_desc_elem = tx_desc_pool->freelist;
  119. count = 0;
  120. pool_id_32 = (uint32_t)pool_id;
  121. while (tx_desc_elem) {
  122. page_id = count / num_desc_per_page;
  123. offset = count % num_desc_per_page;
  124. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  125. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  126. tx_desc_elem->id = id;
  127. tx_desc_elem->pool_id = pool_id;
  128. tx_desc_elem = tx_desc_elem->next;
  129. count++;
  130. }
  131. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  132. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  133. return QDF_STATUS_SUCCESS;
  134. free_tx_desc:
  135. qdf_mem_multi_pages_free(soc->osdev,
  136. &tx_desc_pool->desc_pages, 0, true);
  137. fail_exit:
  138. return QDF_STATUS_E_FAULT;
  139. }
  140. /**
  141. * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
  142. *
  143. * @soc Handle to DP SoC structure
  144. * @pool_id
  145. *
  146. * Return:
  147. */
  148. QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  149. {
  150. struct dp_tx_desc_pool_s *tx_desc_pool =
  151. &((soc)->tx_desc[(pool_id)]);
  152. qdf_mem_multi_pages_free(soc->osdev,
  153. &tx_desc_pool->desc_pages, 0, true);
  154. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  155. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  156. return QDF_STATUS_SUCCESS;
  157. }
  158. /**
  159. * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
  160. * @soc Handle to DP SoC structure
  161. * @pool_id
  162. *
  163. * Return: NONE
  164. */
  165. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  166. uint16_t num_elem)
  167. {
  168. uint16_t num_page;
  169. uint32_t count;
  170. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  171. struct qdf_mem_dma_page_t *page_info;
  172. struct qdf_mem_multi_page_t *pages;
  173. QDF_STATUS status;
  174. /* Coherent tx extension descriptor alloc */
  175. soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  176. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  177. qdf_mem_multi_pages_alloc(soc->osdev,
  178. &soc->tx_ext_desc[pool_id].desc_pages,
  179. soc->tx_ext_desc[pool_id].elem_size,
  180. soc->tx_ext_desc[pool_id].elem_count,
  181. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  182. false);
  183. if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
  184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  185. "ext desc page alloc fail");
  186. status = QDF_STATUS_E_NOMEM;
  187. goto fail_exit;
  188. }
  189. num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
  190. /*
  191. * Cacheable ext descriptor link alloc
  192. * This structure also large size already
  193. * single element is 24bytes, 2K elements are 48Kbytes
  194. * Have to alloc multi page cacheable memory
  195. */
  196. soc->tx_ext_desc[pool_id].link_elem_size =
  197. sizeof(struct dp_tx_ext_desc_elem_s);
  198. qdf_mem_multi_pages_alloc(soc->osdev,
  199. &soc->tx_ext_desc[pool_id].desc_link_pages,
  200. soc->tx_ext_desc[pool_id].link_elem_size,
  201. soc->tx_ext_desc[pool_id].elem_count, 0,
  202. true);
  203. if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
  204. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  205. "ext link desc page alloc fail");
  206. status = QDF_STATUS_E_NOMEM;
  207. goto free_ext_desc_page;
  208. }
  209. /* link tx descriptors into a freelist */
  210. soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
  211. *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
  212. if (qdf_mem_multi_page_link(soc->osdev,
  213. &soc->tx_ext_desc[pool_id].desc_link_pages,
  214. soc->tx_ext_desc[pool_id].link_elem_size,
  215. soc->tx_ext_desc[pool_id].elem_count, true)) {
  216. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  217. "ext link desc page linking fail");
  218. status = QDF_STATUS_E_FAULT;
  219. goto free_ext_link_desc_page;
  220. }
  221. /* Assign coherent memory pointer into linked free list */
  222. pages = &soc->tx_ext_desc[pool_id].desc_pages;
  223. page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
  224. c_elem = soc->tx_ext_desc[pool_id].freelist;
  225. p_elem = c_elem;
  226. for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
  227. if (!(count % pages->num_element_per_page)) {
  228. /**
  229. * First element for new page,
  230. * should point next page
  231. */
  232. if (!pages->dma_pages->page_v_addr_start) {
  233. QDF_TRACE(QDF_MODULE_ID_DP,
  234. QDF_TRACE_LEVEL_ERROR,
  235. "link over flow");
  236. status = QDF_STATUS_E_FAULT;
  237. goto free_ext_link_desc_page;
  238. }
  239. c_elem->vaddr = (void *)page_info->page_v_addr_start;
  240. c_elem->paddr = page_info->page_p_addr;
  241. page_info++;
  242. } else {
  243. c_elem->vaddr = (void *)(p_elem->vaddr +
  244. soc->tx_ext_desc[pool_id].elem_size);
  245. c_elem->paddr = (p_elem->paddr +
  246. soc->tx_ext_desc[pool_id].elem_size);
  247. }
  248. p_elem = c_elem;
  249. c_elem = c_elem->next;
  250. if (!c_elem)
  251. break;
  252. }
  253. soc->tx_ext_desc[pool_id].num_free = num_elem;
  254. qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
  255. return QDF_STATUS_SUCCESS;
  256. free_ext_link_desc_page:
  257. qdf_mem_multi_pages_free(soc->osdev,
  258. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  259. free_ext_desc_page:
  260. qdf_mem_multi_pages_free(soc->osdev,
  261. &soc->tx_ext_desc[pool_id].desc_pages,
  262. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  263. false);
  264. fail_exit:
  265. return status;
  266. }
  267. /**
  268. * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
  269. * @soc: Handle to DP SoC structure
  270. * @pool_id: extension descriptor pool id
  271. *
  272. * Return: NONE
  273. */
  274. QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  275. {
  276. qdf_mem_multi_pages_free(soc->osdev,
  277. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  278. qdf_mem_multi_pages_free(soc->osdev,
  279. &soc->tx_ext_desc[pool_id].desc_pages,
  280. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  281. false);
  282. qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
  283. return QDF_STATUS_SUCCESS;
  284. }
  285. /**
  286. * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
  287. * @soc: Handle to DP SoC structure
  288. * @pool_id: tso descriptor pool id
  289. * @num_elem: number of element
  290. *
  291. * Return: QDF_STATUS_SUCCESS
  292. */
  293. #if defined(FEATURE_TSO)
  294. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  295. uint16_t num_elem)
  296. {
  297. int i;
  298. struct qdf_tso_seg_elem_t *c_element;
  299. struct qdf_tso_seg_elem_t *temp;
  300. soc->tx_tso_desc[pool_id].num_free = 0;
  301. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  302. if (!c_element) {
  303. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  304. FL("Alloc Failed %pK pool_id %d"),
  305. soc, pool_id);
  306. return QDF_STATUS_E_NOMEM;
  307. }
  308. soc->tx_tso_desc[pool_id].freelist = c_element;
  309. soc->tx_tso_desc[pool_id].num_free++;
  310. for (i = 0; i < (num_elem - 1); i++) {
  311. c_element->next =
  312. qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  313. if (!c_element->next) {
  314. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  315. FL("Alloc Failed %pK pool_id %d"),
  316. soc, pool_id);
  317. goto fail;
  318. }
  319. soc->tx_tso_desc[pool_id].num_free++;
  320. c_element = c_element->next;
  321. c_element->next = NULL;
  322. }
  323. TSO_DEBUG("Number of free descriptors: %u\n",
  324. soc->tx_tso_desc[pool_id].num_free);
  325. soc->tx_tso_desc[pool_id].pool_size = num_elem;
  326. qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
  327. return QDF_STATUS_SUCCESS;
  328. fail:
  329. c_element = soc->tx_tso_desc[pool_id].freelist;
  330. while (c_element) {
  331. temp = c_element->next;
  332. qdf_mem_free(c_element);
  333. c_element = temp;
  334. }
  335. return QDF_STATUS_E_NOMEM;
  336. }
  337. /**
  338. * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
  339. * @soc: Handle to DP SoC structure
  340. * @pool_id: extension descriptor pool id
  341. *
  342. * Return: NONE
  343. */
  344. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  345. {
  346. int i;
  347. struct qdf_tso_seg_elem_t *c_element;
  348. struct qdf_tso_seg_elem_t *temp;
  349. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  350. c_element = soc->tx_tso_desc[pool_id].freelist;
  351. if (!c_element) {
  352. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  353. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  354. FL("Desc Pool Corrupt %d"), pool_id);
  355. return;
  356. }
  357. for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
  358. temp = c_element->next;
  359. qdf_mem_free(c_element);
  360. c_element = temp;
  361. if (!c_element)
  362. break;
  363. }
  364. soc->tx_tso_desc[pool_id].freelist = NULL;
  365. soc->tx_tso_desc[pool_id].num_free = 0;
  366. soc->tx_tso_desc[pool_id].pool_size = 0;
  367. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  368. qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
  369. return;
  370. }
  371. /**
  372. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  373. * fragments in each tso segment
  374. *
  375. * @soc: handle to dp soc structure
  376. * @pool_id: descriptor pool id
  377. * @num_elem: total number of descriptors to be allocated
  378. */
  379. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  380. uint16_t num_elem)
  381. {
  382. int i;
  383. struct qdf_tso_num_seg_elem_t *c_element;
  384. struct qdf_tso_num_seg_elem_t *temp;
  385. soc->tx_tso_num_seg[pool_id].num_free = 0;
  386. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  387. if (!c_element) {
  388. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  389. FL("Alloc Failed %pK pool_id %d"),
  390. soc, pool_id);
  391. return QDF_STATUS_E_NOMEM;
  392. }
  393. soc->tx_tso_num_seg[pool_id].freelist = c_element;
  394. soc->tx_tso_num_seg[pool_id].num_free++;
  395. for (i = 0; i < (num_elem - 1); i++) {
  396. c_element->next =
  397. qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  398. if (!c_element->next) {
  399. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  400. FL("Alloc Failed %pK pool_id %d"),
  401. soc, pool_id);
  402. goto fail;
  403. }
  404. soc->tx_tso_num_seg[pool_id].num_free++;
  405. c_element = c_element->next;
  406. c_element->next = NULL;
  407. }
  408. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
  409. qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock);
  410. return QDF_STATUS_SUCCESS;
  411. fail:
  412. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  413. while (c_element) {
  414. temp = c_element->next;
  415. qdf_mem_free(c_element);
  416. c_element = temp;
  417. }
  418. return QDF_STATUS_E_NOMEM;
  419. }
  420. /**
  421. * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
  422. * the fragments in tso segment
  423. *
  424. *
  425. * @soc: handle to dp soc structure
  426. * @pool_id: descriptor pool_id
  427. */
  428. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  429. {
  430. int i;
  431. struct qdf_tso_num_seg_elem_t *c_element;
  432. struct qdf_tso_num_seg_elem_t *temp;
  433. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  434. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  435. if (!c_element) {
  436. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  437. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  438. FL("Desc Pool Corrupt %d"), pool_id);
  439. return;
  440. }
  441. for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
  442. temp = c_element->next;
  443. qdf_mem_free(c_element);
  444. c_element = temp;
  445. if (!c_element)
  446. break;
  447. }
  448. soc->tx_tso_num_seg[pool_id].freelist = NULL;
  449. soc->tx_tso_num_seg[pool_id].num_free = 0;
  450. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
  451. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  452. qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
  453. return;
  454. }
  455. #else
  456. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  457. uint16_t num_elem)
  458. {
  459. return QDF_STATUS_SUCCESS;
  460. }
  461. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  462. {
  463. return;
  464. }
  465. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  466. uint16_t num_elem)
  467. {
  468. return QDF_STATUS_SUCCESS;
  469. }
  470. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  471. {
  472. return;
  473. }
  474. #endif