dp_tx_desc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool: Handle to DP tx_desc_pool structure
  43. * @num_elem: Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  91. {
  92. int i;
  93. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  94. uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  95. for (i = 0; i < num_pool; i++) {
  96. tx_desc_pool = &soc->tx_desc[i];
  97. if (tx_desc_pool)
  98. qdf_tx_desc_pool_free_bufs(soc,
  99. &tx_desc_pool->desc_pages,
  100. tx_desc_pool->elem_size,
  101. tx_desc_pool->elem_count,
  102. true, &dp_tx_desc_clean_up,
  103. nbuf_list);
  104. }
  105. }
  106. #endif
  107. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  108. uint32_t num_elem)
  109. {
  110. uint32_t desc_size, num_elem_t;
  111. struct dp_tx_desc_pool_s *tx_desc_pool;
  112. QDF_STATUS status;
  113. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  114. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  115. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  116. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  117. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_DESC_TYPE,
  118. &tx_desc_pool->desc_pages,
  119. desc_size, num_elem_t,
  120. 0, true);
  121. if (!tx_desc_pool->desc_pages.num_pages) {
  122. dp_err("Multi page alloc fail, tx desc");
  123. return QDF_STATUS_E_NOMEM;
  124. }
  125. /* Arch specific TX descriptor allocation */
  126. status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
  127. if (QDF_IS_STATUS_ERROR(status)) {
  128. dp_err("failed to allocate arch specific descriptors");
  129. return QDF_STATUS_E_NOMEM;
  130. }
  131. return QDF_STATUS_SUCCESS;
  132. }
  133. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  134. {
  135. struct dp_tx_desc_pool_s *tx_desc_pool;
  136. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  137. if (tx_desc_pool->desc_pages.num_pages)
  138. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
  139. &tx_desc_pool->desc_pages, 0,
  140. true);
  141. /* Free arch specific TX descriptor */
  142. soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
  143. }
  144. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  145. uint32_t num_elem)
  146. {
  147. struct dp_tx_desc_pool_s *tx_desc_pool;
  148. uint32_t desc_size, num_elem_t;
  149. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  150. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  151. tx_desc_pool = &soc->tx_desc[pool_id];
  152. if (qdf_mem_multi_page_link(soc->osdev,
  153. &tx_desc_pool->desc_pages,
  154. desc_size, num_elem_t, true)) {
  155. dp_err("invalid tx desc allocation -overflow num link");
  156. return QDF_STATUS_E_FAULT;
  157. }
  158. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  159. *tx_desc_pool->desc_pages.cacheable_pages;
  160. /* Set unique IDs for each Tx descriptor */
  161. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  162. soc, num_elem_t, pool_id)) {
  163. dp_err("initialization per target failed");
  164. return QDF_STATUS_E_FAULT;
  165. }
  166. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  167. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
  168. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  169. return QDF_STATUS_SUCCESS;
  170. }
  171. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  172. {
  173. struct dp_tx_desc_pool_s *tx_desc_pool;
  174. tx_desc_pool = &soc->tx_desc[pool_id];
  175. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  176. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  177. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  178. }
  179. QDF_STATUS
  180. dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  181. uint8_t pool_id)
  182. {
  183. QDF_STATUS status;
  184. qdf_dma_context_t memctx = 0;
  185. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  186. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  187. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  188. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  189. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  190. /* Coherent tx extension descriptor alloc */
  191. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
  192. &dp_tx_ext_desc_pool->desc_pages,
  193. elem_size, num_elem, memctx, false);
  194. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  195. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  196. "ext desc page alloc fail");
  197. return QDF_STATUS_E_NOMEM;
  198. }
  199. /*
  200. * Cacheable ext descriptor link alloc
  201. * This structure also large size already
  202. * single element is 24bytes, 2K elements are 48Kbytes
  203. * Have to alloc multi page cacheable memory
  204. */
  205. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  206. &dp_tx_ext_desc_pool->desc_link_pages,
  207. link_elem_size, num_elem, 0, true);
  208. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  209. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  210. "ext link desc page alloc fail");
  211. status = QDF_STATUS_E_NOMEM;
  212. goto free_ext_desc;
  213. }
  214. return QDF_STATUS_SUCCESS;
  215. free_ext_desc:
  216. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  217. &dp_tx_ext_desc_pool->desc_pages,
  218. memctx, false);
  219. return status;
  220. }
  221. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  222. uint32_t num_elem)
  223. {
  224. QDF_STATUS status;
  225. uint8_t pool_id, count;
  226. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  227. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  228. if (QDF_IS_STATUS_ERROR(status)) {
  229. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  230. goto free_ext_desc_pool;
  231. }
  232. }
  233. return QDF_STATUS_SUCCESS;
  234. free_ext_desc_pool:
  235. for (count = 0; count < pool_id; count++)
  236. dp_tx_ext_desc_pool_free_by_id(soc, count);
  237. return status;
  238. }
  239. QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  240. uint8_t pool_id)
  241. {
  242. uint32_t i;
  243. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  244. struct qdf_mem_dma_page_t *page_info;
  245. struct qdf_mem_multi_page_t *pages;
  246. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  247. QDF_STATUS status;
  248. /* link tx descriptors into a freelist */
  249. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  250. soc->tx_ext_desc[pool_id].elem_size =
  251. HAL_TX_EXT_DESC_WITH_META_DATA;
  252. soc->tx_ext_desc[pool_id].link_elem_size =
  253. sizeof(struct dp_tx_ext_desc_elem_s);
  254. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  255. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  256. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  257. if (qdf_mem_multi_page_link(soc->osdev,
  258. &dp_tx_ext_desc_pool->desc_link_pages,
  259. dp_tx_ext_desc_pool->link_elem_size,
  260. dp_tx_ext_desc_pool->elem_count,
  261. true)) {
  262. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  263. "ext link desc page linking fail");
  264. status = QDF_STATUS_E_FAULT;
  265. goto fail;
  266. }
  267. /* Assign coherent memory pointer into linked free list */
  268. pages = &dp_tx_ext_desc_pool->desc_pages;
  269. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  270. c_elem = dp_tx_ext_desc_pool->freelist;
  271. p_elem = c_elem;
  272. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  273. if (!(i % pages->num_element_per_page)) {
  274. /**
  275. * First element for new page,
  276. * should point next page
  277. */
  278. if (!pages->dma_pages->page_v_addr_start) {
  279. QDF_TRACE(QDF_MODULE_ID_DP,
  280. QDF_TRACE_LEVEL_ERROR,
  281. "link over flow");
  282. status = QDF_STATUS_E_FAULT;
  283. goto fail;
  284. }
  285. c_elem->vaddr =
  286. (void *)page_info->page_v_addr_start;
  287. c_elem->paddr = page_info->page_p_addr;
  288. page_info++;
  289. } else {
  290. c_elem->vaddr = (void *)(p_elem->vaddr +
  291. dp_tx_ext_desc_pool->elem_size);
  292. c_elem->paddr = (p_elem->paddr +
  293. dp_tx_ext_desc_pool->elem_size);
  294. }
  295. p_elem = c_elem;
  296. c_elem = c_elem->next;
  297. if (!c_elem)
  298. break;
  299. }
  300. dp_tx_ext_desc_pool->num_free = num_elem;
  301. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  302. return QDF_STATUS_SUCCESS;
  303. fail:
  304. return status;
  305. }
  306. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  307. uint32_t num_elem)
  308. {
  309. uint8_t pool_id;
  310. QDF_STATUS status;
  311. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  312. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  313. if (QDF_IS_STATUS_ERROR(status)) {
  314. dp_err("failed to init ext desc pool %d", pool_id);
  315. goto fail;
  316. }
  317. }
  318. return QDF_STATUS_SUCCESS;
  319. fail:
  320. return status;
  321. }
  322. void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  323. {
  324. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  325. qdf_dma_context_t memctx = 0;
  326. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  327. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  328. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  329. &dp_tx_ext_desc_pool->desc_link_pages,
  330. 0, true);
  331. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  332. &dp_tx_ext_desc_pool->desc_pages,
  333. memctx, false);
  334. }
  335. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  336. {
  337. uint8_t pool_id;
  338. for (pool_id = 0; pool_id < num_pool; pool_id++)
  339. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  340. }
  341. void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  342. {
  343. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  344. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  345. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  346. }
  347. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  348. {
  349. uint8_t pool_id;
  350. for (pool_id = 0; pool_id < num_pool; pool_id++)
  351. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  352. }
  353. #if defined(FEATURE_TSO)
  354. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  355. uint8_t pool_id)
  356. {
  357. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  358. uint32_t desc_size;
  359. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  360. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  361. tso_desc_pool->num_free = 0;
  362. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
  363. &tso_desc_pool->desc_pages,
  364. desc_size, num_elem, 0, true);
  365. if (!tso_desc_pool->desc_pages.num_pages) {
  366. dp_err("Multi page alloc fail, tx desc");
  367. return QDF_STATUS_E_NOMEM;
  368. }
  369. return QDF_STATUS_SUCCESS;
  370. }
  371. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  372. uint32_t num_elem)
  373. {
  374. uint32_t pool_id, i;
  375. QDF_STATUS status;
  376. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  377. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
  378. pool_id);
  379. if (QDF_IS_STATUS_ERROR(status)) {
  380. dp_err("failed to allocate TSO desc pool %d", pool_id);
  381. goto fail;
  382. }
  383. }
  384. return QDF_STATUS_SUCCESS;
  385. fail:
  386. for (i = 0; i < pool_id; i++)
  387. dp_tx_tso_desc_pool_free_by_id(soc, i);
  388. return QDF_STATUS_E_NOMEM;
  389. }
  390. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  391. {
  392. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  393. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  394. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
  395. &tso_desc_pool->desc_pages,
  396. 0, true);
  397. }
  398. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  399. {
  400. uint32_t pool_id;
  401. for (pool_id = 0; pool_id < num_pool; pool_id++)
  402. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  403. }
  404. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  405. uint8_t pool_id)
  406. {
  407. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  408. uint32_t desc_size;
  409. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  410. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  411. if (qdf_mem_multi_page_link(soc->osdev,
  412. &tso_desc_pool->desc_pages,
  413. desc_size,
  414. num_elem, true)) {
  415. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  416. "invalid tso desc allocation - overflow num link");
  417. return QDF_STATUS_E_FAULT;
  418. }
  419. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  420. *tso_desc_pool->desc_pages.cacheable_pages;
  421. tso_desc_pool->num_free = num_elem;
  422. TSO_DEBUG("Number of free descriptors: %u\n",
  423. tso_desc_pool->num_free);
  424. tso_desc_pool->pool_size = num_elem;
  425. qdf_spinlock_create(&tso_desc_pool->lock);
  426. return QDF_STATUS_SUCCESS;
  427. }
  428. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  429. uint32_t num_elem)
  430. {
  431. QDF_STATUS status;
  432. uint32_t pool_id;
  433. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  434. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
  435. pool_id);
  436. if (QDF_IS_STATUS_ERROR(status)) {
  437. dp_err("failed to initialise TSO desc pool %d", pool_id);
  438. return status;
  439. }
  440. }
  441. return QDF_STATUS_SUCCESS;
  442. }
  443. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  444. {
  445. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  446. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  447. if (tso_desc_pool->pool_size) {
  448. qdf_spin_lock_bh(&tso_desc_pool->lock);
  449. tso_desc_pool->freelist = NULL;
  450. tso_desc_pool->num_free = 0;
  451. tso_desc_pool->pool_size = 0;
  452. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  453. qdf_spinlock_destroy(&tso_desc_pool->lock);
  454. }
  455. }
  456. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  457. {
  458. uint32_t pool_id;
  459. for (pool_id = 0; pool_id < num_pool; pool_id++)
  460. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  461. }
  462. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  463. uint32_t num_elem,
  464. uint8_t pool_id)
  465. {
  466. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  467. uint32_t desc_size;
  468. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  469. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  470. tso_num_seg_pool->num_free = 0;
  471. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  472. &tso_num_seg_pool->desc_pages,
  473. desc_size,
  474. num_elem, 0, true);
  475. if (!tso_num_seg_pool->desc_pages.num_pages) {
  476. dp_err("Multi page alloc fail, tso_num_seg_pool");
  477. return QDF_STATUS_E_NOMEM;
  478. }
  479. return QDF_STATUS_SUCCESS;
  480. }
  481. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  482. uint32_t num_elem)
  483. {
  484. uint32_t pool_id, i;
  485. QDF_STATUS status;
  486. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  487. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
  488. pool_id);
  489. if (QDF_IS_STATUS_ERROR(status)) {
  490. dp_err("failed to allocate TSO num seg pool %d", pool_id);
  491. goto fail;
  492. }
  493. }
  494. return QDF_STATUS_SUCCESS;
  495. fail:
  496. for (i = 0; i < pool_id; i++)
  497. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  498. return QDF_STATUS_E_NOMEM;
  499. }
  500. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  501. {
  502. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  503. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  504. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  505. &tso_num_seg_pool->desc_pages,
  506. 0, true);
  507. }
  508. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  509. {
  510. uint32_t pool_id;
  511. for (pool_id = 0; pool_id < num_pool; pool_id++)
  512. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  513. }
  514. QDF_STATUS
  515. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  516. uint8_t pool_id)
  517. {
  518. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  519. uint32_t desc_size;
  520. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  521. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  522. if (qdf_mem_multi_page_link(soc->osdev,
  523. &tso_num_seg_pool->desc_pages,
  524. desc_size,
  525. num_elem, true)) {
  526. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  527. "invalid tso desc allocation - overflow num link");
  528. return QDF_STATUS_E_FAULT;
  529. }
  530. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  531. *tso_num_seg_pool->desc_pages.cacheable_pages;
  532. tso_num_seg_pool->num_free = num_elem;
  533. tso_num_seg_pool->num_seg_pool_size = num_elem;
  534. qdf_spinlock_create(&tso_num_seg_pool->lock);
  535. return QDF_STATUS_SUCCESS;
  536. }
  537. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  538. uint32_t num_elem)
  539. {
  540. uint32_t pool_id;
  541. QDF_STATUS status;
  542. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  543. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
  544. pool_id);
  545. if (QDF_IS_STATUS_ERROR(status)) {
  546. dp_err("failed to initialise TSO num seg pool %d", pool_id);
  547. return status;
  548. }
  549. }
  550. return QDF_STATUS_SUCCESS;
  551. }
  552. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  553. {
  554. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  555. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  556. if (tso_num_seg_pool->num_seg_pool_size) {
  557. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  558. tso_num_seg_pool->freelist = NULL;
  559. tso_num_seg_pool->num_free = 0;
  560. tso_num_seg_pool->num_seg_pool_size = 0;
  561. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  562. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  563. }
  564. }
  565. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  566. {
  567. uint32_t pool_id;
  568. for (pool_id = 0; pool_id < num_pool; pool_id++)
  569. dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
  570. }
  571. #else
  572. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  573. uint8_t pool_id)
  574. {
  575. return QDF_STATUS_SUCCESS;
  576. }
  577. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  578. uint32_t num_elem)
  579. {
  580. return QDF_STATUS_SUCCESS;
  581. }
  582. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  583. uint8_t pool_id)
  584. {
  585. return QDF_STATUS_SUCCESS;
  586. }
  587. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  588. uint32_t num_elem)
  589. {
  590. return QDF_STATUS_SUCCESS;
  591. }
  592. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  593. {
  594. }
  595. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  596. {
  597. }
  598. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  599. {
  600. }
  601. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  602. {
  603. }
  604. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  605. uint32_t num_elem,
  606. uint8_t pool_id)
  607. {
  608. return QDF_STATUS_SUCCESS;
  609. }
  610. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  611. uint32_t num_elem)
  612. {
  613. return QDF_STATUS_SUCCESS;
  614. }
  615. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  616. {
  617. }
  618. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  619. {
  620. }
  621. QDF_STATUS
  622. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  623. uint8_t pool_id)
  624. {
  625. return QDF_STATUS_SUCCESS;
  626. }
  627. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  628. uint32_t num_elem)
  629. {
  630. return QDF_STATUS_SUCCESS;
  631. }
  632. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  633. {
  634. }
  635. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  636. {
  637. }
  638. #endif