dp_tx_desc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool: Handle to DP tx_desc_pool structure
  43. * @num_elem: Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  91. {
  92. int i;
  93. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  94. uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  95. for (i = 0; i < num_pool; i++) {
  96. tx_desc_pool = &soc->tx_desc[i];
  97. if (tx_desc_pool)
  98. qdf_tx_desc_pool_free_bufs(soc,
  99. &tx_desc_pool->desc_pages,
  100. tx_desc_pool->elem_size,
  101. tx_desc_pool->elem_count,
  102. true, &dp_tx_desc_clean_up,
  103. nbuf_list);
  104. }
  105. }
  106. #endif
  107. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  108. uint32_t num_elem)
  109. {
  110. uint32_t desc_size;
  111. struct dp_tx_desc_pool_s *tx_desc_pool;
  112. QDF_STATUS status;
  113. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  114. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  115. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  116. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_DESC_TYPE,
  117. &tx_desc_pool->desc_pages,
  118. desc_size, num_elem,
  119. 0, true);
  120. if (!tx_desc_pool->desc_pages.num_pages) {
  121. dp_err("Multi page alloc fail, tx desc");
  122. return QDF_STATUS_E_NOMEM;
  123. }
  124. /* Arch specific TX descriptor allocation */
  125. status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem, pool_id);
  126. if (QDF_IS_STATUS_ERROR(status)) {
  127. dp_err("failed to allocate arch specific descriptors");
  128. return QDF_STATUS_E_NOMEM;
  129. }
  130. return QDF_STATUS_SUCCESS;
  131. }
  132. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  133. {
  134. struct dp_tx_desc_pool_s *tx_desc_pool;
  135. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  136. if (tx_desc_pool->desc_pages.num_pages)
  137. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
  138. &tx_desc_pool->desc_pages, 0,
  139. true);
  140. /* Free arch specific TX descriptor */
  141. soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
  142. }
  143. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  144. uint32_t num_elem)
  145. {
  146. struct dp_tx_desc_pool_s *tx_desc_pool;
  147. uint32_t desc_size;
  148. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  149. tx_desc_pool = &soc->tx_desc[pool_id];
  150. if (qdf_mem_multi_page_link(soc->osdev,
  151. &tx_desc_pool->desc_pages,
  152. desc_size, num_elem, true)) {
  153. dp_err("invalid tx desc allocation -overflow num link");
  154. return QDF_STATUS_E_FAULT;
  155. }
  156. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  157. *tx_desc_pool->desc_pages.cacheable_pages;
  158. /* Set unique IDs for each Tx descriptor */
  159. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  160. soc, num_elem, pool_id)) {
  161. dp_err("initialization per target failed");
  162. return QDF_STATUS_E_FAULT;
  163. }
  164. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  165. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  166. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  167. return QDF_STATUS_SUCCESS;
  168. }
  169. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  170. {
  171. struct dp_tx_desc_pool_s *tx_desc_pool;
  172. tx_desc_pool = &soc->tx_desc[pool_id];
  173. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  174. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  175. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  176. }
  177. QDF_STATUS
  178. dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  179. uint8_t pool_id)
  180. {
  181. QDF_STATUS status;
  182. qdf_dma_context_t memctx = 0;
  183. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  184. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  185. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  186. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  187. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  188. /* Coherent tx extension descriptor alloc */
  189. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
  190. &dp_tx_ext_desc_pool->desc_pages,
  191. elem_size, num_elem, memctx, false);
  192. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  193. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  194. "ext desc page alloc fail");
  195. return QDF_STATUS_E_NOMEM;
  196. }
  197. /*
  198. * Cacheable ext descriptor link alloc
  199. * This structure also large size already
  200. * single element is 24bytes, 2K elements are 48Kbytes
  201. * Have to alloc multi page cacheable memory
  202. */
  203. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  204. &dp_tx_ext_desc_pool->desc_link_pages,
  205. link_elem_size, num_elem, 0, true);
  206. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  207. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  208. "ext link desc page alloc fail");
  209. status = QDF_STATUS_E_NOMEM;
  210. goto free_ext_desc;
  211. }
  212. return QDF_STATUS_SUCCESS;
  213. free_ext_desc:
  214. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  215. &dp_tx_ext_desc_pool->desc_pages,
  216. memctx, false);
  217. return status;
  218. }
  219. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  220. uint32_t num_elem)
  221. {
  222. QDF_STATUS status;
  223. uint8_t pool_id, count;
  224. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  225. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  226. if (QDF_IS_STATUS_ERROR(status)) {
  227. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  228. goto free_ext_desc_pool;
  229. }
  230. }
  231. return QDF_STATUS_SUCCESS;
  232. free_ext_desc_pool:
  233. for (count = 0; count < pool_id; count++)
  234. dp_tx_ext_desc_pool_free_by_id(soc, count);
  235. return status;
  236. }
  237. QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  238. uint8_t pool_id)
  239. {
  240. uint32_t i;
  241. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  242. struct qdf_mem_dma_page_t *page_info;
  243. struct qdf_mem_multi_page_t *pages;
  244. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  245. QDF_STATUS status;
  246. /* link tx descriptors into a freelist */
  247. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  248. soc->tx_ext_desc[pool_id].elem_size =
  249. HAL_TX_EXT_DESC_WITH_META_DATA;
  250. soc->tx_ext_desc[pool_id].link_elem_size =
  251. sizeof(struct dp_tx_ext_desc_elem_s);
  252. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  253. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  254. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  255. if (qdf_mem_multi_page_link(soc->osdev,
  256. &dp_tx_ext_desc_pool->desc_link_pages,
  257. dp_tx_ext_desc_pool->link_elem_size,
  258. dp_tx_ext_desc_pool->elem_count,
  259. true)) {
  260. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  261. "ext link desc page linking fail");
  262. status = QDF_STATUS_E_FAULT;
  263. goto fail;
  264. }
  265. /* Assign coherent memory pointer into linked free list */
  266. pages = &dp_tx_ext_desc_pool->desc_pages;
  267. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  268. c_elem = dp_tx_ext_desc_pool->freelist;
  269. p_elem = c_elem;
  270. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  271. if (!(i % pages->num_element_per_page)) {
  272. /**
  273. * First element for new page,
  274. * should point next page
  275. */
  276. if (!pages->dma_pages->page_v_addr_start) {
  277. QDF_TRACE(QDF_MODULE_ID_DP,
  278. QDF_TRACE_LEVEL_ERROR,
  279. "link over flow");
  280. status = QDF_STATUS_E_FAULT;
  281. goto fail;
  282. }
  283. c_elem->vaddr =
  284. (void *)page_info->page_v_addr_start;
  285. c_elem->paddr = page_info->page_p_addr;
  286. page_info++;
  287. } else {
  288. c_elem->vaddr = (void *)(p_elem->vaddr +
  289. dp_tx_ext_desc_pool->elem_size);
  290. c_elem->paddr = (p_elem->paddr +
  291. dp_tx_ext_desc_pool->elem_size);
  292. }
  293. p_elem = c_elem;
  294. c_elem = c_elem->next;
  295. if (!c_elem)
  296. break;
  297. }
  298. dp_tx_ext_desc_pool->num_free = num_elem;
  299. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  300. return QDF_STATUS_SUCCESS;
  301. fail:
  302. return status;
  303. }
  304. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  305. uint32_t num_elem)
  306. {
  307. uint8_t pool_id;
  308. QDF_STATUS status;
  309. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  310. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  311. if (QDF_IS_STATUS_ERROR(status)) {
  312. dp_err("failed to init ext desc pool %d", pool_id);
  313. goto fail;
  314. }
  315. }
  316. return QDF_STATUS_SUCCESS;
  317. fail:
  318. return status;
  319. }
  320. void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  321. {
  322. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  323. qdf_dma_context_t memctx = 0;
  324. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  325. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  326. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  327. &dp_tx_ext_desc_pool->desc_link_pages,
  328. 0, true);
  329. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  330. &dp_tx_ext_desc_pool->desc_pages,
  331. memctx, false);
  332. }
  333. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  334. {
  335. uint8_t pool_id;
  336. for (pool_id = 0; pool_id < num_pool; pool_id++)
  337. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  338. }
  339. void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  340. {
  341. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  342. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  343. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  344. }
  345. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  346. {
  347. uint8_t pool_id;
  348. for (pool_id = 0; pool_id < num_pool; pool_id++)
  349. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  350. }
  351. #if defined(FEATURE_TSO)
  352. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  353. uint8_t pool_id)
  354. {
  355. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  356. uint32_t desc_size;
  357. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  358. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  359. tso_desc_pool->num_free = 0;
  360. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
  361. &tso_desc_pool->desc_pages,
  362. desc_size, num_elem, 0, true);
  363. if (!tso_desc_pool->desc_pages.num_pages) {
  364. dp_err("Multi page alloc fail, tx desc");
  365. return QDF_STATUS_E_NOMEM;
  366. }
  367. return QDF_STATUS_SUCCESS;
  368. }
  369. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  370. uint32_t num_elem)
  371. {
  372. uint32_t pool_id, i;
  373. QDF_STATUS status;
  374. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  375. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
  376. pool_id);
  377. if (QDF_IS_STATUS_ERROR(status)) {
  378. dp_err("failed to allocate TSO desc pool %d", pool_id);
  379. goto fail;
  380. }
  381. }
  382. return QDF_STATUS_SUCCESS;
  383. fail:
  384. for (i = 0; i < pool_id; i++)
  385. dp_tx_tso_desc_pool_free_by_id(soc, i);
  386. return QDF_STATUS_E_NOMEM;
  387. }
  388. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  389. {
  390. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  391. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  392. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
  393. &tso_desc_pool->desc_pages,
  394. 0, true);
  395. }
  396. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  397. {
  398. uint32_t pool_id;
  399. for (pool_id = 0; pool_id < num_pool; pool_id++)
  400. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  401. }
  402. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  403. uint8_t pool_id)
  404. {
  405. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  406. uint32_t desc_size;
  407. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  408. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  409. if (qdf_mem_multi_page_link(soc->osdev,
  410. &tso_desc_pool->desc_pages,
  411. desc_size,
  412. num_elem, true)) {
  413. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  414. "invalid tso desc allocation - overflow num link");
  415. return QDF_STATUS_E_FAULT;
  416. }
  417. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  418. *tso_desc_pool->desc_pages.cacheable_pages;
  419. tso_desc_pool->num_free = num_elem;
  420. TSO_DEBUG("Number of free descriptors: %u\n",
  421. tso_desc_pool->num_free);
  422. tso_desc_pool->pool_size = num_elem;
  423. qdf_spinlock_create(&tso_desc_pool->lock);
  424. return QDF_STATUS_SUCCESS;
  425. }
  426. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  427. uint32_t num_elem)
  428. {
  429. QDF_STATUS status;
  430. uint32_t pool_id;
  431. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  432. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
  433. pool_id);
  434. if (QDF_IS_STATUS_ERROR(status)) {
  435. dp_err("failed to initialise TSO desc pool %d", pool_id);
  436. return status;
  437. }
  438. }
  439. return QDF_STATUS_SUCCESS;
  440. }
  441. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  442. {
  443. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  444. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  445. if (tso_desc_pool->pool_size) {
  446. qdf_spin_lock_bh(&tso_desc_pool->lock);
  447. tso_desc_pool->freelist = NULL;
  448. tso_desc_pool->num_free = 0;
  449. tso_desc_pool->pool_size = 0;
  450. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  451. qdf_spinlock_destroy(&tso_desc_pool->lock);
  452. }
  453. }
  454. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  455. {
  456. uint32_t pool_id;
  457. for (pool_id = 0; pool_id < num_pool; pool_id++)
  458. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  459. }
  460. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  461. uint32_t num_elem,
  462. uint8_t pool_id)
  463. {
  464. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  465. uint32_t desc_size;
  466. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  467. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  468. tso_num_seg_pool->num_free = 0;
  469. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  470. &tso_num_seg_pool->desc_pages,
  471. desc_size,
  472. num_elem, 0, true);
  473. if (!tso_num_seg_pool->desc_pages.num_pages) {
  474. dp_err("Multi page alloc fail, tso_num_seg_pool");
  475. return QDF_STATUS_E_NOMEM;
  476. }
  477. return QDF_STATUS_SUCCESS;
  478. }
  479. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  480. uint32_t num_elem)
  481. {
  482. uint32_t pool_id, i;
  483. QDF_STATUS status;
  484. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  485. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
  486. pool_id);
  487. if (QDF_IS_STATUS_ERROR(status)) {
  488. dp_err("failed to allocate TSO num seg pool %d", pool_id);
  489. goto fail;
  490. }
  491. }
  492. return QDF_STATUS_SUCCESS;
  493. fail:
  494. for (i = 0; i < pool_id; i++)
  495. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  496. return QDF_STATUS_E_NOMEM;
  497. }
  498. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  499. {
  500. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  501. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  502. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  503. &tso_num_seg_pool->desc_pages,
  504. 0, true);
  505. }
  506. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  507. {
  508. uint32_t pool_id;
  509. for (pool_id = 0; pool_id < num_pool; pool_id++)
  510. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  511. }
  512. QDF_STATUS
  513. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  514. uint8_t pool_id)
  515. {
  516. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  517. uint32_t desc_size;
  518. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  519. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  520. if (qdf_mem_multi_page_link(soc->osdev,
  521. &tso_num_seg_pool->desc_pages,
  522. desc_size,
  523. num_elem, true)) {
  524. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  525. "invalid tso desc allocation - overflow num link");
  526. return QDF_STATUS_E_FAULT;
  527. }
  528. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  529. *tso_num_seg_pool->desc_pages.cacheable_pages;
  530. tso_num_seg_pool->num_free = num_elem;
  531. tso_num_seg_pool->num_seg_pool_size = num_elem;
  532. qdf_spinlock_create(&tso_num_seg_pool->lock);
  533. return QDF_STATUS_SUCCESS;
  534. }
  535. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  536. uint32_t num_elem)
  537. {
  538. uint32_t pool_id;
  539. QDF_STATUS status;
  540. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  541. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
  542. pool_id);
  543. if (QDF_IS_STATUS_ERROR(status)) {
  544. dp_err("failed to initialise TSO num seg pool %d", pool_id);
  545. return status;
  546. }
  547. }
  548. return QDF_STATUS_SUCCESS;
  549. }
  550. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  551. {
  552. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  553. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  554. if (tso_num_seg_pool->num_seg_pool_size) {
  555. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  556. tso_num_seg_pool->freelist = NULL;
  557. tso_num_seg_pool->num_free = 0;
  558. tso_num_seg_pool->num_seg_pool_size = 0;
  559. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  560. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  561. }
  562. }
  563. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  564. {
  565. uint32_t pool_id;
  566. for (pool_id = 0; pool_id < num_pool; pool_id++)
  567. dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
  568. }
  569. #else
  570. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  571. uint8_t pool_id)
  572. {
  573. return QDF_STATUS_SUCCESS;
  574. }
  575. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  576. uint32_t num_elem)
  577. {
  578. return QDF_STATUS_SUCCESS;
  579. }
  580. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  581. uint8_t pool_id)
  582. {
  583. return QDF_STATUS_SUCCESS;
  584. }
  585. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  586. uint32_t num_elem)
  587. {
  588. return QDF_STATUS_SUCCESS;
  589. }
  590. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  591. {
  592. }
  593. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  594. {
  595. }
  596. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  597. {
  598. }
  599. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  600. {
  601. }
  602. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  603. uint32_t num_elem,
  604. uint8_t pool_id)
  605. {
  606. return QDF_STATUS_SUCCESS;
  607. }
  608. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  609. uint32_t num_elem)
  610. {
  611. return QDF_STATUS_SUCCESS;
  612. }
  613. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  614. {
  615. }
  616. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  617. {
  618. }
  619. QDF_STATUS
  620. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  621. uint8_t pool_id);
  622. {
  623. return QDF_STATUS_SUCCESS;
  624. }
  625. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  626. uint32_t num_elem)
  627. {
  628. return QDF_STATUS_SUCCESS;
  629. }
  630. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  631. {
  632. }
  633. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  634. {
  635. }
  636. #endif