dp_tx_desc.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool: Handle to DP tx_desc_pool structure
  43. * @num_elem: Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  91. {
  92. int i;
  93. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  94. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  95. for (i = 0; i < num_pool; i++) {
  96. tx_desc_pool = dp_get_tx_desc_pool(soc, i);
  97. if (tx_desc_pool)
  98. qdf_tx_desc_pool_free_bufs(soc,
  99. &tx_desc_pool->desc_pages,
  100. tx_desc_pool->elem_size,
  101. tx_desc_pool->elem_count,
  102. true, &dp_tx_desc_clean_up,
  103. nbuf_list);
  104. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, i);
  105. if (tx_desc_pool)
  106. qdf_tx_desc_pool_free_bufs(soc,
  107. &tx_desc_pool->desc_pages,
  108. tx_desc_pool->elem_size,
  109. tx_desc_pool->elem_count,
  110. true, &dp_tx_desc_clean_up,
  111. nbuf_list);
  112. }
  113. }
  114. #endif
  115. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  116. static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
  117. bool spcl_tx_desc)
  118. {
  119. struct dp_global_context *dp_global = NULL;
  120. dp_global = wlan_objmgr_get_global_ctx();
  121. if (spcl_tx_desc) {
  122. dp_global->spcl_tx_desc[soc->arch_id][pool_id] =
  123. qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
  124. } else {
  125. dp_global->tx_desc[soc->arch_id][pool_id] =
  126. qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
  127. }
  128. }
  129. static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
  130. bool spcl_tx_desc)
  131. {
  132. struct dp_global_context *dp_global = NULL;
  133. dp_global = wlan_objmgr_get_global_ctx();
  134. if (spcl_tx_desc) {
  135. if (!dp_global->spcl_tx_desc[soc->arch_id][pool_id])
  136. return;
  137. qdf_mem_free(dp_global->spcl_tx_desc[soc->arch_id][pool_id]);
  138. dp_global->spcl_tx_desc[soc->arch_id][pool_id] = NULL;
  139. } else {
  140. if (!dp_global->tx_desc[soc->arch_id][pool_id])
  141. return;
  142. qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]);
  143. dp_global->tx_desc[soc->arch_id][pool_id] = NULL;
  144. }
  145. }
  146. #else
  147. static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
  148. bool spcl_tx_desc)
  149. {
  150. }
  151. static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
  152. bool spcl_tx_desc)
  153. {
  154. }
  155. #endif
  156. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  157. uint32_t num_elem, bool spcl_tx_desc)
  158. {
  159. uint32_t desc_size, num_elem_t;
  160. struct dp_tx_desc_pool_s *tx_desc_pool;
  161. QDF_STATUS status;
  162. enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
  163. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  164. dp_tx_desc_pool_alloc_mem(soc, pool_id, spcl_tx_desc);
  165. if (spcl_tx_desc) {
  166. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  167. desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
  168. num_elem_t = num_elem;
  169. } else {
  170. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  171. desc_type = QDF_DP_TX_DESC_TYPE;
  172. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  173. }
  174. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  175. dp_desc_multi_pages_mem_alloc(soc, desc_type,
  176. &tx_desc_pool->desc_pages,
  177. desc_size, num_elem_t,
  178. 0, true);
  179. if (!tx_desc_pool->desc_pages.num_pages) {
  180. dp_err("Multi page alloc fail, tx desc");
  181. return QDF_STATUS_E_NOMEM;
  182. }
  183. /* Arch specific TX descriptor allocation */
  184. status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
  185. if (QDF_IS_STATUS_ERROR(status)) {
  186. dp_err("failed to allocate arch specific descriptors");
  187. return QDF_STATUS_E_NOMEM;
  188. }
  189. return QDF_STATUS_SUCCESS;
  190. }
  191. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
  192. bool spcl_tx_desc)
  193. {
  194. struct dp_tx_desc_pool_s *tx_desc_pool;
  195. enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
  196. if (spcl_tx_desc) {
  197. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  198. desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
  199. } else {
  200. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  201. desc_type = QDF_DP_TX_DESC_TYPE;
  202. }
  203. if (tx_desc_pool->desc_pages.num_pages)
  204. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
  205. &tx_desc_pool->desc_pages, 0,
  206. true);
  207. /* Free arch specific TX descriptor */
  208. soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
  209. dp_tx_desc_pool_free_mem(soc, pool_id, spcl_tx_desc);
  210. }
  211. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  212. uint32_t num_elem, bool spcl_tx_desc)
  213. {
  214. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  215. uint32_t desc_size, num_elem_t;
  216. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  217. if (spcl_tx_desc) {
  218. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  219. num_elem_t = num_elem;
  220. } else {
  221. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  222. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  223. }
  224. if (qdf_mem_multi_page_link(soc->osdev,
  225. &tx_desc_pool->desc_pages,
  226. desc_size, num_elem_t, true)) {
  227. dp_err("invalid tx desc allocation -overflow num link");
  228. return QDF_STATUS_E_FAULT;
  229. }
  230. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  231. *tx_desc_pool->desc_pages.cacheable_pages;
  232. /* Set unique IDs for each Tx descriptor */
  233. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  234. soc, num_elem_t,
  235. pool_id, spcl_tx_desc)) {
  236. dp_err("initialization per target failed");
  237. return QDF_STATUS_E_FAULT;
  238. }
  239. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  240. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
  241. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
  245. bool spcl_tx_desc)
  246. {
  247. struct dp_tx_desc_pool_s *tx_desc_pool;
  248. if (spcl_tx_desc)
  249. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  250. else
  251. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  252. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
  253. pool_id, spcl_tx_desc);
  254. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  255. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  256. }
  257. QDF_STATUS
  258. dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  259. uint8_t pool_id)
  260. {
  261. QDF_STATUS status;
  262. qdf_dma_context_t memctx = 0;
  263. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  264. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  265. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  266. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  267. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  268. /* Coherent tx extension descriptor alloc */
  269. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
  270. &dp_tx_ext_desc_pool->desc_pages,
  271. elem_size, num_elem, memctx, false);
  272. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  273. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  274. "ext desc page alloc fail");
  275. return QDF_STATUS_E_NOMEM;
  276. }
  277. /*
  278. * Cacheable ext descriptor link alloc
  279. * This structure also large size already
  280. * single element is 24bytes, 2K elements are 48Kbytes
  281. * Have to alloc multi page cacheable memory
  282. */
  283. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  284. &dp_tx_ext_desc_pool->desc_link_pages,
  285. link_elem_size, num_elem, 0, true);
  286. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  287. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  288. "ext link desc page alloc fail");
  289. status = QDF_STATUS_E_NOMEM;
  290. goto free_ext_desc;
  291. }
  292. return QDF_STATUS_SUCCESS;
  293. free_ext_desc:
  294. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  295. &dp_tx_ext_desc_pool->desc_pages,
  296. memctx, false);
  297. return status;
  298. }
  299. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  300. uint32_t num_elem)
  301. {
  302. QDF_STATUS status;
  303. uint8_t pool_id, count;
  304. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  305. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  306. if (QDF_IS_STATUS_ERROR(status)) {
  307. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  308. goto free_ext_desc_pool;
  309. }
  310. }
  311. return QDF_STATUS_SUCCESS;
  312. free_ext_desc_pool:
  313. for (count = 0; count < pool_id; count++)
  314. dp_tx_ext_desc_pool_free_by_id(soc, count);
  315. return status;
  316. }
  317. QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  318. uint8_t pool_id)
  319. {
  320. uint32_t i;
  321. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  322. struct qdf_mem_dma_page_t *page_info;
  323. struct qdf_mem_multi_page_t *pages;
  324. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  325. QDF_STATUS status;
  326. /* link tx descriptors into a freelist */
  327. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  328. soc->tx_ext_desc[pool_id].elem_size =
  329. HAL_TX_EXT_DESC_WITH_META_DATA;
  330. soc->tx_ext_desc[pool_id].link_elem_size =
  331. sizeof(struct dp_tx_ext_desc_elem_s);
  332. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  333. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  334. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  335. if (qdf_mem_multi_page_link(soc->osdev,
  336. &dp_tx_ext_desc_pool->desc_link_pages,
  337. dp_tx_ext_desc_pool->link_elem_size,
  338. dp_tx_ext_desc_pool->elem_count,
  339. true)) {
  340. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  341. "ext link desc page linking fail");
  342. status = QDF_STATUS_E_FAULT;
  343. goto fail;
  344. }
  345. /* Assign coherent memory pointer into linked free list */
  346. pages = &dp_tx_ext_desc_pool->desc_pages;
  347. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  348. c_elem = dp_tx_ext_desc_pool->freelist;
  349. p_elem = c_elem;
  350. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  351. if (!(i % pages->num_element_per_page)) {
  352. /**
  353. * First element for new page,
  354. * should point next page
  355. */
  356. if (!pages->dma_pages->page_v_addr_start) {
  357. QDF_TRACE(QDF_MODULE_ID_DP,
  358. QDF_TRACE_LEVEL_ERROR,
  359. "link over flow");
  360. status = QDF_STATUS_E_FAULT;
  361. goto fail;
  362. }
  363. c_elem->vaddr =
  364. (void *)page_info->page_v_addr_start;
  365. c_elem->paddr = page_info->page_p_addr;
  366. page_info++;
  367. } else {
  368. c_elem->vaddr = (void *)(p_elem->vaddr +
  369. dp_tx_ext_desc_pool->elem_size);
  370. c_elem->paddr = (p_elem->paddr +
  371. dp_tx_ext_desc_pool->elem_size);
  372. }
  373. p_elem = c_elem;
  374. c_elem = c_elem->next;
  375. if (!c_elem)
  376. break;
  377. }
  378. dp_tx_ext_desc_pool->num_free = num_elem;
  379. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  380. return QDF_STATUS_SUCCESS;
  381. fail:
  382. return status;
  383. }
  384. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  385. uint32_t num_elem)
  386. {
  387. uint8_t pool_id;
  388. QDF_STATUS status;
  389. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  390. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  391. if (QDF_IS_STATUS_ERROR(status)) {
  392. dp_err("failed to init ext desc pool %d", pool_id);
  393. goto fail;
  394. }
  395. }
  396. return QDF_STATUS_SUCCESS;
  397. fail:
  398. return status;
  399. }
  400. void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  401. {
  402. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  403. qdf_dma_context_t memctx = 0;
  404. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  405. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  406. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  407. &dp_tx_ext_desc_pool->desc_link_pages,
  408. 0, true);
  409. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  410. &dp_tx_ext_desc_pool->desc_pages,
  411. memctx, false);
  412. }
  413. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  414. {
  415. uint8_t pool_id;
  416. for (pool_id = 0; pool_id < num_pool; pool_id++)
  417. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  418. }
  419. void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  420. {
  421. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  422. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  423. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  424. }
  425. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  426. {
  427. uint8_t pool_id;
  428. for (pool_id = 0; pool_id < num_pool; pool_id++)
  429. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  430. }
  431. #if defined(FEATURE_TSO)
  432. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  433. uint8_t pool_id)
  434. {
  435. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  436. uint32_t desc_size;
  437. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  438. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  439. tso_desc_pool->num_free = 0;
  440. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
  441. &tso_desc_pool->desc_pages,
  442. desc_size, num_elem, 0, true);
  443. if (!tso_desc_pool->desc_pages.num_pages) {
  444. dp_err("Multi page alloc fail, tx desc");
  445. return QDF_STATUS_E_NOMEM;
  446. }
  447. return QDF_STATUS_SUCCESS;
  448. }
  449. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  450. uint32_t num_elem)
  451. {
  452. uint32_t pool_id, i;
  453. QDF_STATUS status;
  454. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  455. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
  456. pool_id);
  457. if (QDF_IS_STATUS_ERROR(status)) {
  458. dp_err("failed to allocate TSO desc pool %d", pool_id);
  459. goto fail;
  460. }
  461. }
  462. return QDF_STATUS_SUCCESS;
  463. fail:
  464. for (i = 0; i < pool_id; i++)
  465. dp_tx_tso_desc_pool_free_by_id(soc, i);
  466. return QDF_STATUS_E_NOMEM;
  467. }
  468. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  469. {
  470. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  471. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  472. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
  473. &tso_desc_pool->desc_pages,
  474. 0, true);
  475. }
  476. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  477. {
  478. uint32_t pool_id;
  479. for (pool_id = 0; pool_id < num_pool; pool_id++)
  480. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  481. }
  482. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  483. uint8_t pool_id)
  484. {
  485. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  486. uint32_t desc_size;
  487. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  488. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  489. if (qdf_mem_multi_page_link(soc->osdev,
  490. &tso_desc_pool->desc_pages,
  491. desc_size,
  492. num_elem, true)) {
  493. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  494. "invalid tso desc allocation - overflow num link");
  495. return QDF_STATUS_E_FAULT;
  496. }
  497. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  498. *tso_desc_pool->desc_pages.cacheable_pages;
  499. tso_desc_pool->num_free = num_elem;
  500. TSO_DEBUG("Number of free descriptors: %u\n",
  501. tso_desc_pool->num_free);
  502. tso_desc_pool->pool_size = num_elem;
  503. qdf_spinlock_create(&tso_desc_pool->lock);
  504. return QDF_STATUS_SUCCESS;
  505. }
  506. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  507. uint32_t num_elem)
  508. {
  509. QDF_STATUS status;
  510. uint32_t pool_id;
  511. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  512. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
  513. pool_id);
  514. if (QDF_IS_STATUS_ERROR(status)) {
  515. dp_err("failed to initialise TSO desc pool %d", pool_id);
  516. return status;
  517. }
  518. }
  519. return QDF_STATUS_SUCCESS;
  520. }
  521. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  522. {
  523. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  524. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  525. if (tso_desc_pool->pool_size) {
  526. qdf_spin_lock_bh(&tso_desc_pool->lock);
  527. tso_desc_pool->freelist = NULL;
  528. tso_desc_pool->num_free = 0;
  529. tso_desc_pool->pool_size = 0;
  530. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  531. qdf_spinlock_destroy(&tso_desc_pool->lock);
  532. }
  533. }
  534. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  535. {
  536. uint32_t pool_id;
  537. for (pool_id = 0; pool_id < num_pool; pool_id++)
  538. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  539. }
  540. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  541. uint32_t num_elem,
  542. uint8_t pool_id)
  543. {
  544. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  545. uint32_t desc_size;
  546. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  547. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  548. tso_num_seg_pool->num_free = 0;
  549. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  550. &tso_num_seg_pool->desc_pages,
  551. desc_size,
  552. num_elem, 0, true);
  553. if (!tso_num_seg_pool->desc_pages.num_pages) {
  554. dp_err("Multi page alloc fail, tso_num_seg_pool");
  555. return QDF_STATUS_E_NOMEM;
  556. }
  557. return QDF_STATUS_SUCCESS;
  558. }
  559. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  560. uint32_t num_elem)
  561. {
  562. uint32_t pool_id, i;
  563. QDF_STATUS status;
  564. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  565. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
  566. pool_id);
  567. if (QDF_IS_STATUS_ERROR(status)) {
  568. dp_err("failed to allocate TSO num seg pool %d", pool_id);
  569. goto fail;
  570. }
  571. }
  572. return QDF_STATUS_SUCCESS;
  573. fail:
  574. for (i = 0; i < pool_id; i++)
  575. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  576. return QDF_STATUS_E_NOMEM;
  577. }
  578. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  579. {
  580. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  581. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  582. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  583. &tso_num_seg_pool->desc_pages,
  584. 0, true);
  585. }
  586. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  587. {
  588. uint32_t pool_id;
  589. for (pool_id = 0; pool_id < num_pool; pool_id++)
  590. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  591. }
  592. QDF_STATUS
  593. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  594. uint8_t pool_id)
  595. {
  596. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  597. uint32_t desc_size;
  598. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  599. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  600. if (qdf_mem_multi_page_link(soc->osdev,
  601. &tso_num_seg_pool->desc_pages,
  602. desc_size,
  603. num_elem, true)) {
  604. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  605. "invalid tso desc allocation - overflow num link");
  606. return QDF_STATUS_E_FAULT;
  607. }
  608. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  609. *tso_num_seg_pool->desc_pages.cacheable_pages;
  610. tso_num_seg_pool->num_free = num_elem;
  611. tso_num_seg_pool->num_seg_pool_size = num_elem;
  612. qdf_spinlock_create(&tso_num_seg_pool->lock);
  613. return QDF_STATUS_SUCCESS;
  614. }
  615. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  616. uint32_t num_elem)
  617. {
  618. uint32_t pool_id;
  619. QDF_STATUS status;
  620. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  621. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
  622. pool_id);
  623. if (QDF_IS_STATUS_ERROR(status)) {
  624. dp_err("failed to initialise TSO num seg pool %d", pool_id);
  625. return status;
  626. }
  627. }
  628. return QDF_STATUS_SUCCESS;
  629. }
  630. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  631. {
  632. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  633. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  634. if (tso_num_seg_pool->num_seg_pool_size) {
  635. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  636. tso_num_seg_pool->freelist = NULL;
  637. tso_num_seg_pool->num_free = 0;
  638. tso_num_seg_pool->num_seg_pool_size = 0;
  639. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  640. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  641. }
  642. }
  643. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  644. {
  645. uint32_t pool_id;
  646. for (pool_id = 0; pool_id < num_pool; pool_id++)
  647. dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
  648. }
  649. #else
  650. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  651. uint8_t pool_id)
  652. {
  653. return QDF_STATUS_SUCCESS;
  654. }
  655. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  656. uint32_t num_elem)
  657. {
  658. return QDF_STATUS_SUCCESS;
  659. }
  660. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  661. uint8_t pool_id)
  662. {
  663. return QDF_STATUS_SUCCESS;
  664. }
  665. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  666. uint32_t num_elem)
  667. {
  668. return QDF_STATUS_SUCCESS;
  669. }
  670. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  671. {
  672. }
  673. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  674. {
  675. }
  676. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  677. {
  678. }
  679. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  680. {
  681. }
  682. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  683. uint32_t num_elem,
  684. uint8_t pool_id)
  685. {
  686. return QDF_STATUS_SUCCESS;
  687. }
  688. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  689. uint32_t num_elem)
  690. {
  691. return QDF_STATUS_SUCCESS;
  692. }
  693. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  694. {
  695. }
  696. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  697. {
  698. }
  699. QDF_STATUS
  700. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  701. uint8_t pool_id)
  702. {
  703. return QDF_STATUS_SUCCESS;
  704. }
  705. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  706. uint32_t num_elem)
  707. {
  708. return QDF_STATUS_SUCCESS;
  709. }
  710. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  711. {
  712. }
  713. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  714. {
  715. }
  716. #endif