dp_tx_desc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_tx_desc.h"
  21. #ifndef DESC_PARTITION
  22. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  23. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  24. do { \
  25. uint8_t sig_bit; \
  26. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  27. /* Calculate page divider to find page number */ \
  28. sig_bit = 0; \
  29. while (num_desc_per_page) { \
  30. sig_bit++; \
  31. num_desc_per_page = num_desc_per_page >> 1; \
  32. } \
  33. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  34. } while (0)
  35. #else
  36. #define DP_TX_DESC_SIZE(a) a
  37. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  38. #endif /* DESC_PARTITION */
  39. /**
  40. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  41. * @tx_desc_pool Handle to DP tx_desc_pool structure
  42. * @num_elem Number of descriptor elements per pool
  43. *
  44. * Return: None
  45. */
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. static void
  48. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  49. uint16_t num_elem)
  50. {
  51. }
  52. #else
  53. static void
  54. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  55. uint16_t num_elem)
  56. {
  57. tx_desc_pool->num_free = num_elem;
  58. tx_desc_pool->num_allocated = 0;
  59. }
  60. #endif
  61. /**
  62. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  63. * @soc Handle to DP SoC structure
  64. * @num_pool Number of pools to allocate
  65. * @num_elem Number of descriptor elements per pool
  66. *
  67. * This function allocates memory for SW tx descriptors
  68. * (used within host for tx data path).
  69. * The number of tx descriptors required will be large
  70. * since based on number of clients (1024 clients x 3 radios),
  71. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  72. * large.
  73. *
  74. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  75. * function to allocate memory
  76. * in multiple pages. It then iterates through the memory allocated across pages
  77. * and links each descriptor
  78. * to next descriptor, taking care of page boundaries.
  79. *
  80. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  81. * one for each ring;
  82. * This minimizes lock contention when hard_start_xmit is called
  83. * from multiple CPUs.
  84. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  85. * flow control.
  86. *
  87. * Return: Status code. 0 for success.
  88. */
  89. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  90. uint16_t num_elem)
  91. {
  92. uint32_t id, count, page_id, offset, pool_id_32;
  93. uint16_t num_page, num_desc_per_page;
  94. struct dp_tx_desc_s *tx_desc_elem;
  95. uint32_t desc_size;
  96. struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  97. desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  98. tx_desc_pool->elem_size = desc_size;
  99. qdf_mem_multi_pages_alloc(soc->osdev,
  100. &tx_desc_pool->desc_pages, desc_size, num_elem,
  101. 0, true);
  102. if (!tx_desc_pool->desc_pages.num_pages) {
  103. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  104. "Multi page alloc fail, tx desc");
  105. goto fail_exit;
  106. }
  107. num_page = tx_desc_pool->desc_pages.num_pages;
  108. num_desc_per_page =
  109. tx_desc_pool->desc_pages.num_element_per_page;
  110. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  111. *tx_desc_pool->desc_pages.cacheable_pages;
  112. if (qdf_mem_multi_page_link(soc->osdev,
  113. &tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
  114. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  115. "invalid tx desc allocation - overflow num link");
  116. goto free_tx_desc;
  117. }
  118. /* Set unique IDs for each Tx descriptor */
  119. tx_desc_elem = tx_desc_pool->freelist;
  120. count = 0;
  121. pool_id_32 = (uint32_t)pool_id;
  122. while (tx_desc_elem) {
  123. page_id = count / num_desc_per_page;
  124. offset = count % num_desc_per_page;
  125. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  126. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  127. tx_desc_elem->id = id;
  128. tx_desc_elem->pool_id = pool_id;
  129. tx_desc_elem = tx_desc_elem->next;
  130. count++;
  131. }
  132. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  133. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  134. return QDF_STATUS_SUCCESS;
  135. free_tx_desc:
  136. qdf_mem_multi_pages_free(soc->osdev,
  137. &tx_desc_pool->desc_pages, 0, true);
  138. fail_exit:
  139. return QDF_STATUS_E_FAULT;
  140. }
  141. /**
  142. * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
  143. *
  144. * @soc Handle to DP SoC structure
  145. * @pool_id
  146. *
  147. * Return:
  148. */
  149. QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  150. {
  151. struct dp_tx_desc_pool_s *tx_desc_pool =
  152. &((soc)->tx_desc[(pool_id)]);
  153. qdf_mem_multi_pages_free(soc->osdev,
  154. &tx_desc_pool->desc_pages, 0, true);
  155. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  156. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  157. return QDF_STATUS_SUCCESS;
  158. }
  159. /**
  160. * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
  161. * @soc Handle to DP SoC structure
  162. * @pool_id
  163. *
  164. * Return: NONE
  165. */
  166. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  167. uint16_t num_elem)
  168. {
  169. uint16_t num_page;
  170. uint32_t count;
  171. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  172. struct qdf_mem_dma_page_t *page_info;
  173. struct qdf_mem_multi_page_t *pages;
  174. QDF_STATUS status;
  175. /* Coherent tx extension descriptor alloc */
  176. soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  177. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  178. qdf_mem_multi_pages_alloc(soc->osdev,
  179. &soc->tx_ext_desc[pool_id].desc_pages,
  180. soc->tx_ext_desc[pool_id].elem_size,
  181. soc->tx_ext_desc[pool_id].elem_count,
  182. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  183. false);
  184. if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
  185. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  186. "ext desc page alloc fail");
  187. status = QDF_STATUS_E_NOMEM;
  188. goto fail_exit;
  189. }
  190. num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
  191. /*
  192. * Cacheable ext descriptor link alloc
  193. * This structure also large size already
  194. * single element is 24bytes, 2K elements are 48Kbytes
  195. * Have to alloc multi page cacheable memory
  196. */
  197. soc->tx_ext_desc[pool_id].link_elem_size =
  198. sizeof(struct dp_tx_ext_desc_elem_s);
  199. qdf_mem_multi_pages_alloc(soc->osdev,
  200. &soc->tx_ext_desc[pool_id].desc_link_pages,
  201. soc->tx_ext_desc[pool_id].link_elem_size,
  202. soc->tx_ext_desc[pool_id].elem_count, 0,
  203. true);
  204. if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
  205. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  206. "ext link desc page alloc fail");
  207. status = QDF_STATUS_E_NOMEM;
  208. goto free_ext_desc_page;
  209. }
  210. /* link tx descriptors into a freelist */
  211. soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
  212. *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
  213. if (qdf_mem_multi_page_link(soc->osdev,
  214. &soc->tx_ext_desc[pool_id].desc_link_pages,
  215. soc->tx_ext_desc[pool_id].link_elem_size,
  216. soc->tx_ext_desc[pool_id].elem_count, true)) {
  217. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  218. "ext link desc page linking fail");
  219. status = QDF_STATUS_E_FAULT;
  220. goto free_ext_link_desc_page;
  221. }
  222. /* Assign coherent memory pointer into linked free list */
  223. pages = &soc->tx_ext_desc[pool_id].desc_pages;
  224. page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
  225. c_elem = soc->tx_ext_desc[pool_id].freelist;
  226. p_elem = c_elem;
  227. for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
  228. if (!(count % pages->num_element_per_page)) {
  229. /**
  230. * First element for new page,
  231. * should point next page
  232. */
  233. if (!pages->dma_pages->page_v_addr_start) {
  234. QDF_TRACE(QDF_MODULE_ID_DP,
  235. QDF_TRACE_LEVEL_ERROR,
  236. "link over flow");
  237. status = QDF_STATUS_E_FAULT;
  238. goto free_ext_link_desc_page;
  239. }
  240. c_elem->vaddr = (void *)page_info->page_v_addr_start;
  241. c_elem->paddr = page_info->page_p_addr;
  242. page_info++;
  243. } else {
  244. c_elem->vaddr = (void *)(p_elem->vaddr +
  245. soc->tx_ext_desc[pool_id].elem_size);
  246. c_elem->paddr = (p_elem->paddr +
  247. soc->tx_ext_desc[pool_id].elem_size);
  248. }
  249. p_elem = c_elem;
  250. c_elem = c_elem->next;
  251. if (!c_elem)
  252. break;
  253. }
  254. soc->tx_ext_desc[pool_id].num_free = num_elem;
  255. qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
  256. return QDF_STATUS_SUCCESS;
  257. free_ext_link_desc_page:
  258. qdf_mem_multi_pages_free(soc->osdev,
  259. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  260. free_ext_desc_page:
  261. qdf_mem_multi_pages_free(soc->osdev,
  262. &soc->tx_ext_desc[pool_id].desc_pages,
  263. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  264. false);
  265. fail_exit:
  266. return status;
  267. }
  268. /**
  269. * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
  270. * @soc: Handle to DP SoC structure
  271. * @pool_id: extension descriptor pool id
  272. *
  273. * Return: NONE
  274. */
  275. QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  276. {
  277. qdf_mem_multi_pages_free(soc->osdev,
  278. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  279. qdf_mem_multi_pages_free(soc->osdev,
  280. &soc->tx_ext_desc[pool_id].desc_pages,
  281. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  282. false);
  283. qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
  284. return QDF_STATUS_SUCCESS;
  285. }
  286. /**
  287. * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
  288. * @soc: Handle to DP SoC structure
  289. * @pool_id: tso descriptor pool id
  290. * @num_elem: number of element
  291. *
  292. * Return: QDF_STATUS_SUCCESS
  293. */
  294. #if defined(FEATURE_TSO)
  295. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  296. uint16_t num_elem)
  297. {
  298. int i;
  299. struct qdf_tso_seg_elem_t *c_element;
  300. struct qdf_tso_seg_elem_t *temp;
  301. soc->tx_tso_desc[pool_id].num_free = 0;
  302. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  303. if (!c_element) {
  304. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  305. FL("Alloc Failed %pK pool_id %d"),
  306. soc, pool_id);
  307. return QDF_STATUS_E_NOMEM;
  308. }
  309. soc->tx_tso_desc[pool_id].freelist = c_element;
  310. soc->tx_tso_desc[pool_id].num_free++;
  311. for (i = 0; i < (num_elem - 1); i++) {
  312. c_element->next =
  313. qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  314. if (!c_element->next) {
  315. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  316. FL("Alloc Failed %pK pool_id %d"),
  317. soc, pool_id);
  318. goto fail;
  319. }
  320. soc->tx_tso_desc[pool_id].num_free++;
  321. c_element = c_element->next;
  322. c_element->next = NULL;
  323. }
  324. TSO_DEBUG("Number of free descriptors: %u\n",
  325. soc->tx_tso_desc[pool_id].num_free);
  326. soc->tx_tso_desc[pool_id].pool_size = num_elem;
  327. qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
  328. return QDF_STATUS_SUCCESS;
  329. fail:
  330. c_element = soc->tx_tso_desc[pool_id].freelist;
  331. while (c_element) {
  332. temp = c_element->next;
  333. qdf_mem_free(c_element);
  334. c_element = temp;
  335. }
  336. return QDF_STATUS_E_NOMEM;
  337. }
  338. /**
  339. * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
  340. * @soc: Handle to DP SoC structure
  341. * @pool_id: extension descriptor pool id
  342. *
  343. * Return: NONE
  344. */
  345. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  346. {
  347. int i;
  348. struct qdf_tso_seg_elem_t *c_element;
  349. struct qdf_tso_seg_elem_t *temp;
  350. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  351. c_element = soc->tx_tso_desc[pool_id].freelist;
  352. if (!c_element) {
  353. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  354. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  355. FL("Desc Pool Corrupt %d"), pool_id);
  356. return;
  357. }
  358. for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
  359. temp = c_element->next;
  360. qdf_mem_free(c_element);
  361. c_element = temp;
  362. if (!c_element)
  363. break;
  364. }
  365. soc->tx_tso_desc[pool_id].freelist = NULL;
  366. soc->tx_tso_desc[pool_id].num_free = 0;
  367. soc->tx_tso_desc[pool_id].pool_size = 0;
  368. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  369. qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
  370. return;
  371. }
  372. /**
  373. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  374. * fragments in each tso segment
  375. *
  376. * @soc: handle to dp soc structure
  377. * @pool_id: descriptor pool id
  378. * @num_elem: total number of descriptors to be allocated
  379. */
  380. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  381. uint16_t num_elem)
  382. {
  383. int i;
  384. struct qdf_tso_num_seg_elem_t *c_element;
  385. struct qdf_tso_num_seg_elem_t *temp;
  386. soc->tx_tso_num_seg[pool_id].num_free = 0;
  387. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  388. if (!c_element) {
  389. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  390. FL("Alloc Failed %pK pool_id %d"),
  391. soc, pool_id);
  392. return QDF_STATUS_E_NOMEM;
  393. }
  394. soc->tx_tso_num_seg[pool_id].freelist = c_element;
  395. soc->tx_tso_num_seg[pool_id].num_free++;
  396. for (i = 0; i < (num_elem - 1); i++) {
  397. c_element->next =
  398. qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  399. if (!c_element->next) {
  400. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  401. FL("Alloc Failed %pK pool_id %d"),
  402. soc, pool_id);
  403. goto fail;
  404. }
  405. soc->tx_tso_num_seg[pool_id].num_free++;
  406. c_element = c_element->next;
  407. c_element->next = NULL;
  408. }
  409. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
  410. qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock);
  411. return QDF_STATUS_SUCCESS;
  412. fail:
  413. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  414. while (c_element) {
  415. temp = c_element->next;
  416. qdf_mem_free(c_element);
  417. c_element = temp;
  418. }
  419. return QDF_STATUS_E_NOMEM;
  420. }
  421. /**
  422. * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
  423. * the fragments in tso segment
  424. *
  425. *
  426. * @soc: handle to dp soc structure
  427. * @pool_id: descriptor pool_id
  428. */
  429. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  430. {
  431. int i;
  432. struct qdf_tso_num_seg_elem_t *c_element;
  433. struct qdf_tso_num_seg_elem_t *temp;
  434. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  435. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  436. if (!c_element) {
  437. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  438. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  439. FL("Desc Pool Corrupt %d"), pool_id);
  440. return;
  441. }
  442. for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
  443. temp = c_element->next;
  444. qdf_mem_free(c_element);
  445. c_element = temp;
  446. if (!c_element)
  447. break;
  448. }
  449. soc->tx_tso_num_seg[pool_id].freelist = NULL;
  450. soc->tx_tso_num_seg[pool_id].num_free = 0;
  451. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
  452. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  453. qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
  454. return;
  455. }
  456. #else
  457. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  458. uint16_t num_elem)
  459. {
  460. return QDF_STATUS_SUCCESS;
  461. }
  462. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  463. {
  464. return;
  465. }
  466. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  467. uint16_t num_elem)
  468. {
  469. return QDF_STATUS_SUCCESS;
  470. }
  471. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  472. {
  473. return;
  474. }
  475. #endif