dp_tx_desc.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool: Handle to DP tx_desc_pool structure
  43. * @num_elem: Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  91. {
  92. int i;
  93. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  94. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  95. for (i = 0; i < num_pool; i++) {
  96. tx_desc_pool = dp_get_tx_desc_pool(soc, i);
  97. if (tx_desc_pool)
  98. qdf_tx_desc_pool_free_bufs(soc,
  99. &tx_desc_pool->desc_pages,
  100. tx_desc_pool->elem_size,
  101. tx_desc_pool->elem_count,
  102. true, &dp_tx_desc_clean_up,
  103. nbuf_list);
  104. }
  105. }
  106. #endif
  107. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  108. static void dp_tx_desc_pool_alloc_mem(int8_t pool_id, bool spcl_tx_desc)
  109. {
  110. struct dp_global_context *dp_global = NULL;
  111. dp_global = wlan_objmgr_get_global_ctx();
  112. if (spcl_tx_desc) {
  113. dp_global->spcl_tx_desc[pool_id] =
  114. qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
  115. } else {
  116. dp_global->tx_desc[pool_id] =
  117. qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
  118. }
  119. }
  120. static void dp_tx_desc_pool_free_mem(int8_t pool_id, bool spcl_tx_desc)
  121. {
  122. struct dp_global_context *dp_global = NULL;
  123. dp_global = wlan_objmgr_get_global_ctx();
  124. if (spcl_tx_desc)
  125. qdf_mem_free(dp_global->spcl_tx_desc[pool_id]);
  126. else
  127. qdf_mem_free(dp_global->tx_desc[pool_id]);
  128. }
  129. #else
  130. static void dp_tx_desc_pool_alloc_mem(int8_t pool_id, bool spcl_tx_desc)
  131. {
  132. }
  133. static void dp_tx_desc_pool_free_mem(int8_t pool_id, bool spcl_tx_desc)
  134. {
  135. }
  136. #endif
  137. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  138. uint32_t num_elem, bool spcl_tx_desc)
  139. {
  140. uint32_t desc_size, num_elem_t;
  141. struct dp_tx_desc_pool_s *tx_desc_pool;
  142. QDF_STATUS status;
  143. enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
  144. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  145. dp_tx_desc_pool_alloc_mem(pool_id, spcl_tx_desc);
  146. if (spcl_tx_desc) {
  147. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  148. desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
  149. num_elem_t = num_elem;
  150. } else {
  151. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  152. desc_type = QDF_DP_TX_DESC_TYPE;
  153. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  154. }
  155. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  156. dp_desc_multi_pages_mem_alloc(soc, desc_type,
  157. &tx_desc_pool->desc_pages,
  158. desc_size, num_elem_t,
  159. 0, true);
  160. if (!tx_desc_pool->desc_pages.num_pages) {
  161. dp_err("Multi page alloc fail, tx desc");
  162. return QDF_STATUS_E_NOMEM;
  163. }
  164. /* Arch specific TX descriptor allocation */
  165. status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
  166. if (QDF_IS_STATUS_ERROR(status)) {
  167. dp_err("failed to allocate arch specific descriptors");
  168. return QDF_STATUS_E_NOMEM;
  169. }
  170. return QDF_STATUS_SUCCESS;
  171. }
  172. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
  173. bool spcl_tx_desc)
  174. {
  175. struct dp_tx_desc_pool_s *tx_desc_pool;
  176. enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
  177. if (spcl_tx_desc) {
  178. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  179. desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
  180. } else {
  181. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  182. desc_type = QDF_DP_TX_DESC_TYPE;
  183. }
  184. if (tx_desc_pool->desc_pages.num_pages)
  185. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
  186. &tx_desc_pool->desc_pages, 0,
  187. true);
  188. /* Free arch specific TX descriptor */
  189. soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
  190. dp_tx_desc_pool_free_mem(pool_id, spcl_tx_desc);
  191. }
  192. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  193. uint32_t num_elem, bool spcl_tx_desc)
  194. {
  195. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  196. uint32_t desc_size, num_elem_t;
  197. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  198. if (spcl_tx_desc) {
  199. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  200. num_elem_t = num_elem;
  201. } else {
  202. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  203. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  204. }
  205. if (qdf_mem_multi_page_link(soc->osdev,
  206. &tx_desc_pool->desc_pages,
  207. desc_size, num_elem_t, true)) {
  208. dp_err("invalid tx desc allocation -overflow num link");
  209. return QDF_STATUS_E_FAULT;
  210. }
  211. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  212. *tx_desc_pool->desc_pages.cacheable_pages;
  213. /* Set unique IDs for each Tx descriptor */
  214. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  215. soc, num_elem_t,
  216. pool_id, spcl_tx_desc)) {
  217. dp_err("initialization per target failed");
  218. return QDF_STATUS_E_FAULT;
  219. }
  220. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  221. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
  222. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  223. return QDF_STATUS_SUCCESS;
  224. }
  225. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
  226. bool spcl_tx_desc)
  227. {
  228. struct dp_tx_desc_pool_s *tx_desc_pool;
  229. if (spcl_tx_desc)
  230. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  231. else
  232. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  233. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
  234. pool_id, spcl_tx_desc);
  235. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  236. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  237. }
  238. QDF_STATUS
  239. dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  240. uint8_t pool_id)
  241. {
  242. QDF_STATUS status;
  243. qdf_dma_context_t memctx = 0;
  244. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  245. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  246. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  247. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  248. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  249. /* Coherent tx extension descriptor alloc */
  250. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
  251. &dp_tx_ext_desc_pool->desc_pages,
  252. elem_size, num_elem, memctx, false);
  253. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  254. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  255. "ext desc page alloc fail");
  256. return QDF_STATUS_E_NOMEM;
  257. }
  258. /*
  259. * Cacheable ext descriptor link alloc
  260. * This structure also large size already
  261. * single element is 24bytes, 2K elements are 48Kbytes
  262. * Have to alloc multi page cacheable memory
  263. */
  264. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  265. &dp_tx_ext_desc_pool->desc_link_pages,
  266. link_elem_size, num_elem, 0, true);
  267. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  268. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  269. "ext link desc page alloc fail");
  270. status = QDF_STATUS_E_NOMEM;
  271. goto free_ext_desc;
  272. }
  273. return QDF_STATUS_SUCCESS;
  274. free_ext_desc:
  275. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  276. &dp_tx_ext_desc_pool->desc_pages,
  277. memctx, false);
  278. return status;
  279. }
  280. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  281. uint32_t num_elem)
  282. {
  283. QDF_STATUS status;
  284. uint8_t pool_id, count;
  285. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  286. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  287. if (QDF_IS_STATUS_ERROR(status)) {
  288. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  289. goto free_ext_desc_pool;
  290. }
  291. }
  292. return QDF_STATUS_SUCCESS;
  293. free_ext_desc_pool:
  294. for (count = 0; count < pool_id; count++)
  295. dp_tx_ext_desc_pool_free_by_id(soc, count);
  296. return status;
  297. }
  298. QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  299. uint8_t pool_id)
  300. {
  301. uint32_t i;
  302. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  303. struct qdf_mem_dma_page_t *page_info;
  304. struct qdf_mem_multi_page_t *pages;
  305. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  306. QDF_STATUS status;
  307. /* link tx descriptors into a freelist */
  308. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  309. soc->tx_ext_desc[pool_id].elem_size =
  310. HAL_TX_EXT_DESC_WITH_META_DATA;
  311. soc->tx_ext_desc[pool_id].link_elem_size =
  312. sizeof(struct dp_tx_ext_desc_elem_s);
  313. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  314. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  315. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  316. if (qdf_mem_multi_page_link(soc->osdev,
  317. &dp_tx_ext_desc_pool->desc_link_pages,
  318. dp_tx_ext_desc_pool->link_elem_size,
  319. dp_tx_ext_desc_pool->elem_count,
  320. true)) {
  321. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  322. "ext link desc page linking fail");
  323. status = QDF_STATUS_E_FAULT;
  324. goto fail;
  325. }
  326. /* Assign coherent memory pointer into linked free list */
  327. pages = &dp_tx_ext_desc_pool->desc_pages;
  328. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  329. c_elem = dp_tx_ext_desc_pool->freelist;
  330. p_elem = c_elem;
  331. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  332. if (!(i % pages->num_element_per_page)) {
  333. /**
  334. * First element for new page,
  335. * should point next page
  336. */
  337. if (!pages->dma_pages->page_v_addr_start) {
  338. QDF_TRACE(QDF_MODULE_ID_DP,
  339. QDF_TRACE_LEVEL_ERROR,
  340. "link over flow");
  341. status = QDF_STATUS_E_FAULT;
  342. goto fail;
  343. }
  344. c_elem->vaddr =
  345. (void *)page_info->page_v_addr_start;
  346. c_elem->paddr = page_info->page_p_addr;
  347. page_info++;
  348. } else {
  349. c_elem->vaddr = (void *)(p_elem->vaddr +
  350. dp_tx_ext_desc_pool->elem_size);
  351. c_elem->paddr = (p_elem->paddr +
  352. dp_tx_ext_desc_pool->elem_size);
  353. }
  354. p_elem = c_elem;
  355. c_elem = c_elem->next;
  356. if (!c_elem)
  357. break;
  358. }
  359. dp_tx_ext_desc_pool->num_free = num_elem;
  360. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  361. return QDF_STATUS_SUCCESS;
  362. fail:
  363. return status;
  364. }
  365. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  366. uint32_t num_elem)
  367. {
  368. uint8_t pool_id;
  369. QDF_STATUS status;
  370. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  371. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  372. if (QDF_IS_STATUS_ERROR(status)) {
  373. dp_err("failed to init ext desc pool %d", pool_id);
  374. goto fail;
  375. }
  376. }
  377. return QDF_STATUS_SUCCESS;
  378. fail:
  379. return status;
  380. }
  381. void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  382. {
  383. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  384. qdf_dma_context_t memctx = 0;
  385. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  386. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  387. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  388. &dp_tx_ext_desc_pool->desc_link_pages,
  389. 0, true);
  390. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  391. &dp_tx_ext_desc_pool->desc_pages,
  392. memctx, false);
  393. }
  394. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  395. {
  396. uint8_t pool_id;
  397. for (pool_id = 0; pool_id < num_pool; pool_id++)
  398. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  399. }
  400. void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  401. {
  402. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  403. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  404. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  405. }
  406. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  407. {
  408. uint8_t pool_id;
  409. for (pool_id = 0; pool_id < num_pool; pool_id++)
  410. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  411. }
  412. #if defined(FEATURE_TSO)
  413. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  414. uint8_t pool_id)
  415. {
  416. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  417. uint32_t desc_size;
  418. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  419. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  420. tso_desc_pool->num_free = 0;
  421. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
  422. &tso_desc_pool->desc_pages,
  423. desc_size, num_elem, 0, true);
  424. if (!tso_desc_pool->desc_pages.num_pages) {
  425. dp_err("Multi page alloc fail, tx desc");
  426. return QDF_STATUS_E_NOMEM;
  427. }
  428. return QDF_STATUS_SUCCESS;
  429. }
  430. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  431. uint32_t num_elem)
  432. {
  433. uint32_t pool_id, i;
  434. QDF_STATUS status;
  435. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  436. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
  437. pool_id);
  438. if (QDF_IS_STATUS_ERROR(status)) {
  439. dp_err("failed to allocate TSO desc pool %d", pool_id);
  440. goto fail;
  441. }
  442. }
  443. return QDF_STATUS_SUCCESS;
  444. fail:
  445. for (i = 0; i < pool_id; i++)
  446. dp_tx_tso_desc_pool_free_by_id(soc, i);
  447. return QDF_STATUS_E_NOMEM;
  448. }
  449. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  450. {
  451. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  452. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  453. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
  454. &tso_desc_pool->desc_pages,
  455. 0, true);
  456. }
  457. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  458. {
  459. uint32_t pool_id;
  460. for (pool_id = 0; pool_id < num_pool; pool_id++)
  461. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  462. }
  463. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  464. uint8_t pool_id)
  465. {
  466. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  467. uint32_t desc_size;
  468. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  469. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  470. if (qdf_mem_multi_page_link(soc->osdev,
  471. &tso_desc_pool->desc_pages,
  472. desc_size,
  473. num_elem, true)) {
  474. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  475. "invalid tso desc allocation - overflow num link");
  476. return QDF_STATUS_E_FAULT;
  477. }
  478. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  479. *tso_desc_pool->desc_pages.cacheable_pages;
  480. tso_desc_pool->num_free = num_elem;
  481. TSO_DEBUG("Number of free descriptors: %u\n",
  482. tso_desc_pool->num_free);
  483. tso_desc_pool->pool_size = num_elem;
  484. qdf_spinlock_create(&tso_desc_pool->lock);
  485. return QDF_STATUS_SUCCESS;
  486. }
  487. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  488. uint32_t num_elem)
  489. {
  490. QDF_STATUS status;
  491. uint32_t pool_id;
  492. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  493. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
  494. pool_id);
  495. if (QDF_IS_STATUS_ERROR(status)) {
  496. dp_err("failed to initialise TSO desc pool %d", pool_id);
  497. return status;
  498. }
  499. }
  500. return QDF_STATUS_SUCCESS;
  501. }
  502. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  503. {
  504. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  505. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  506. if (tso_desc_pool->pool_size) {
  507. qdf_spin_lock_bh(&tso_desc_pool->lock);
  508. tso_desc_pool->freelist = NULL;
  509. tso_desc_pool->num_free = 0;
  510. tso_desc_pool->pool_size = 0;
  511. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  512. qdf_spinlock_destroy(&tso_desc_pool->lock);
  513. }
  514. }
  515. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  516. {
  517. uint32_t pool_id;
  518. for (pool_id = 0; pool_id < num_pool; pool_id++)
  519. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  520. }
  521. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  522. uint32_t num_elem,
  523. uint8_t pool_id)
  524. {
  525. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  526. uint32_t desc_size;
  527. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  528. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  529. tso_num_seg_pool->num_free = 0;
  530. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  531. &tso_num_seg_pool->desc_pages,
  532. desc_size,
  533. num_elem, 0, true);
  534. if (!tso_num_seg_pool->desc_pages.num_pages) {
  535. dp_err("Multi page alloc fail, tso_num_seg_pool");
  536. return QDF_STATUS_E_NOMEM;
  537. }
  538. return QDF_STATUS_SUCCESS;
  539. }
  540. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  541. uint32_t num_elem)
  542. {
  543. uint32_t pool_id, i;
  544. QDF_STATUS status;
  545. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  546. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
  547. pool_id);
  548. if (QDF_IS_STATUS_ERROR(status)) {
  549. dp_err("failed to allocate TSO num seg pool %d", pool_id);
  550. goto fail;
  551. }
  552. }
  553. return QDF_STATUS_SUCCESS;
  554. fail:
  555. for (i = 0; i < pool_id; i++)
  556. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  557. return QDF_STATUS_E_NOMEM;
  558. }
  559. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  560. {
  561. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  562. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  563. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  564. &tso_num_seg_pool->desc_pages,
  565. 0, true);
  566. }
  567. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  568. {
  569. uint32_t pool_id;
  570. for (pool_id = 0; pool_id < num_pool; pool_id++)
  571. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  572. }
  573. QDF_STATUS
  574. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  575. uint8_t pool_id)
  576. {
  577. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  578. uint32_t desc_size;
  579. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  580. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  581. if (qdf_mem_multi_page_link(soc->osdev,
  582. &tso_num_seg_pool->desc_pages,
  583. desc_size,
  584. num_elem, true)) {
  585. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  586. "invalid tso desc allocation - overflow num link");
  587. return QDF_STATUS_E_FAULT;
  588. }
  589. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  590. *tso_num_seg_pool->desc_pages.cacheable_pages;
  591. tso_num_seg_pool->num_free = num_elem;
  592. tso_num_seg_pool->num_seg_pool_size = num_elem;
  593. qdf_spinlock_create(&tso_num_seg_pool->lock);
  594. return QDF_STATUS_SUCCESS;
  595. }
  596. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  597. uint32_t num_elem)
  598. {
  599. uint32_t pool_id;
  600. QDF_STATUS status;
  601. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  602. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
  603. pool_id);
  604. if (QDF_IS_STATUS_ERROR(status)) {
  605. dp_err("failed to initialise TSO num seg pool %d", pool_id);
  606. return status;
  607. }
  608. }
  609. return QDF_STATUS_SUCCESS;
  610. }
  611. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  612. {
  613. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  614. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  615. if (tso_num_seg_pool->num_seg_pool_size) {
  616. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  617. tso_num_seg_pool->freelist = NULL;
  618. tso_num_seg_pool->num_free = 0;
  619. tso_num_seg_pool->num_seg_pool_size = 0;
  620. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  621. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  622. }
  623. }
  624. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  625. {
  626. uint32_t pool_id;
  627. for (pool_id = 0; pool_id < num_pool; pool_id++)
  628. dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
  629. }
  630. #else
  631. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  632. uint8_t pool_id)
  633. {
  634. return QDF_STATUS_SUCCESS;
  635. }
  636. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  637. uint32_t num_elem)
  638. {
  639. return QDF_STATUS_SUCCESS;
  640. }
  641. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  642. uint8_t pool_id)
  643. {
  644. return QDF_STATUS_SUCCESS;
  645. }
  646. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  647. uint32_t num_elem)
  648. {
  649. return QDF_STATUS_SUCCESS;
  650. }
  651. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  652. {
  653. }
  654. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  655. {
  656. }
  657. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  658. {
  659. }
  660. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  661. {
  662. }
  663. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  664. uint32_t num_elem,
  665. uint8_t pool_id)
  666. {
  667. return QDF_STATUS_SUCCESS;
  668. }
  669. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  670. uint32_t num_elem)
  671. {
  672. return QDF_STATUS_SUCCESS;
  673. }
  674. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  675. {
  676. }
  677. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  678. {
  679. }
  680. QDF_STATUS
  681. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  682. uint8_t pool_id)
  683. {
  684. return QDF_STATUS_SUCCESS;
  685. }
  686. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  687. uint32_t num_elem)
  688. {
  689. return QDF_STATUS_SUCCESS;
  690. }
  691. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  692. {
  693. }
  694. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  695. {
  696. }
  697. #endif