dp_tx_desc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool: Handle to DP tx_desc_pool structure
  43. * @num_elem: Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list,
  91. bool cleanup)
  92. {
  93. int i;
  94. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  95. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  96. if (!cleanup)
  97. return;
  98. for (i = 0; i < num_pool; i++) {
  99. tx_desc_pool = dp_get_tx_desc_pool(soc, i);
  100. TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
  101. if (tx_desc_pool)
  102. qdf_tx_desc_pool_free_bufs(soc,
  103. &tx_desc_pool->desc_pages,
  104. tx_desc_pool->elem_size,
  105. tx_desc_pool->elem_count,
  106. true, &dp_tx_desc_clean_up,
  107. nbuf_list);
  108. TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
  109. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, i);
  110. TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
  111. if (tx_desc_pool)
  112. qdf_tx_desc_pool_free_bufs(soc,
  113. &tx_desc_pool->desc_pages,
  114. tx_desc_pool->elem_size,
  115. tx_desc_pool->elem_count,
  116. true, &dp_tx_desc_clean_up,
  117. nbuf_list);
  118. TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
  119. }
  120. }
  121. #endif
  122. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  123. static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
  124. bool spcl_tx_desc)
  125. {
  126. struct dp_global_context *dp_global = NULL;
  127. dp_global = wlan_objmgr_get_global_ctx();
  128. if (spcl_tx_desc) {
  129. dp_global->spcl_tx_desc[soc->arch_id][pool_id] =
  130. qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
  131. } else {
  132. dp_global->tx_desc[soc->arch_id][pool_id] =
  133. qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
  134. }
  135. }
  136. static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
  137. bool spcl_tx_desc)
  138. {
  139. struct dp_global_context *dp_global = NULL;
  140. dp_global = wlan_objmgr_get_global_ctx();
  141. if (spcl_tx_desc) {
  142. if (!dp_global->spcl_tx_desc[soc->arch_id][pool_id])
  143. return;
  144. qdf_mem_free(dp_global->spcl_tx_desc[soc->arch_id][pool_id]);
  145. dp_global->spcl_tx_desc[soc->arch_id][pool_id] = NULL;
  146. } else {
  147. if (!dp_global->tx_desc[soc->arch_id][pool_id])
  148. return;
  149. qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]);
  150. dp_global->tx_desc[soc->arch_id][pool_id] = NULL;
  151. }
  152. }
  153. #else
  154. static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
  155. bool spcl_tx_desc)
  156. {
  157. }
  158. static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
  159. bool spcl_tx_desc)
  160. {
  161. }
  162. #endif
  163. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  164. uint32_t num_elem, bool spcl_tx_desc)
  165. {
  166. uint32_t desc_size, num_elem_t;
  167. struct dp_tx_desc_pool_s *tx_desc_pool;
  168. QDF_STATUS status;
  169. enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
  170. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  171. dp_tx_desc_pool_alloc_mem(soc, pool_id, spcl_tx_desc);
  172. if (spcl_tx_desc) {
  173. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  174. desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
  175. num_elem_t = num_elem;
  176. } else {
  177. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  178. desc_type = QDF_DP_TX_DESC_TYPE;
  179. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  180. }
  181. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  182. dp_desc_multi_pages_mem_alloc(soc, desc_type,
  183. &tx_desc_pool->desc_pages,
  184. desc_size, num_elem_t,
  185. 0, true);
  186. if (!tx_desc_pool->desc_pages.num_pages) {
  187. dp_err("Multi page alloc fail, tx desc");
  188. return QDF_STATUS_E_NOMEM;
  189. }
  190. /* Arch specific TX descriptor allocation */
  191. status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
  192. if (QDF_IS_STATUS_ERROR(status)) {
  193. dp_err("failed to allocate arch specific descriptors");
  194. return QDF_STATUS_E_NOMEM;
  195. }
  196. return QDF_STATUS_SUCCESS;
  197. }
  198. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
  199. bool spcl_tx_desc)
  200. {
  201. struct dp_tx_desc_pool_s *tx_desc_pool;
  202. enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
  203. if (spcl_tx_desc) {
  204. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  205. desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
  206. } else {
  207. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  208. desc_type = QDF_DP_TX_DESC_TYPE;
  209. }
  210. if (tx_desc_pool->desc_pages.num_pages)
  211. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
  212. &tx_desc_pool->desc_pages, 0,
  213. true);
  214. /* Free arch specific TX descriptor */
  215. soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
  216. dp_tx_desc_pool_free_mem(soc, pool_id, spcl_tx_desc);
  217. }
  218. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  219. uint32_t num_elem, bool spcl_tx_desc)
  220. {
  221. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  222. uint32_t desc_size, num_elem_t;
  223. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  224. if (spcl_tx_desc) {
  225. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  226. num_elem_t = num_elem;
  227. } else {
  228. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  229. num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
  230. }
  231. if (qdf_mem_multi_page_link(soc->osdev,
  232. &tx_desc_pool->desc_pages,
  233. desc_size, num_elem_t, true)) {
  234. dp_err("invalid tx desc allocation -overflow num link");
  235. return QDF_STATUS_E_FAULT;
  236. }
  237. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  238. *tx_desc_pool->desc_pages.cacheable_pages;
  239. /* Set unique IDs for each Tx descriptor */
  240. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  241. soc, num_elem_t,
  242. pool_id, spcl_tx_desc)) {
  243. dp_err("initialization per target failed");
  244. return QDF_STATUS_E_FAULT;
  245. }
  246. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  247. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
  248. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  249. return QDF_STATUS_SUCCESS;
  250. }
  251. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
  252. bool spcl_tx_desc)
  253. {
  254. struct dp_tx_desc_pool_s *tx_desc_pool;
  255. if (spcl_tx_desc)
  256. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  257. else
  258. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
  259. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
  260. pool_id, spcl_tx_desc);
  261. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  262. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  263. }
  264. QDF_STATUS
  265. dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  266. uint8_t pool_id)
  267. {
  268. QDF_STATUS status;
  269. qdf_dma_context_t memctx = 0;
  270. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  271. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  272. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  273. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  274. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  275. /* Coherent tx extension descriptor alloc */
  276. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
  277. &dp_tx_ext_desc_pool->desc_pages,
  278. elem_size, num_elem, memctx, false);
  279. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  280. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  281. "ext desc page alloc fail");
  282. return QDF_STATUS_E_NOMEM;
  283. }
  284. /*
  285. * Cacheable ext descriptor link alloc
  286. * This structure also large size already
  287. * single element is 24bytes, 2K elements are 48Kbytes
  288. * Have to alloc multi page cacheable memory
  289. */
  290. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  291. &dp_tx_ext_desc_pool->desc_link_pages,
  292. link_elem_size, num_elem, 0, true);
  293. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  294. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  295. "ext link desc page alloc fail");
  296. status = QDF_STATUS_E_NOMEM;
  297. goto free_ext_desc;
  298. }
  299. return QDF_STATUS_SUCCESS;
  300. free_ext_desc:
  301. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  302. &dp_tx_ext_desc_pool->desc_pages,
  303. memctx, false);
  304. return status;
  305. }
  306. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  307. uint32_t num_elem)
  308. {
  309. QDF_STATUS status;
  310. uint8_t pool_id, count;
  311. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  312. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  313. if (QDF_IS_STATUS_ERROR(status)) {
  314. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  315. goto free_ext_desc_pool;
  316. }
  317. }
  318. return QDF_STATUS_SUCCESS;
  319. free_ext_desc_pool:
  320. for (count = 0; count < pool_id; count++)
  321. dp_tx_ext_desc_pool_free_by_id(soc, count);
  322. return status;
  323. }
  324. QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  325. uint8_t pool_id)
  326. {
  327. uint32_t i;
  328. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  329. struct qdf_mem_dma_page_t *page_info;
  330. struct qdf_mem_multi_page_t *pages;
  331. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  332. QDF_STATUS status;
  333. /* link tx descriptors into a freelist */
  334. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  335. soc->tx_ext_desc[pool_id].elem_size =
  336. HAL_TX_EXT_DESC_WITH_META_DATA;
  337. soc->tx_ext_desc[pool_id].link_elem_size =
  338. sizeof(struct dp_tx_ext_desc_elem_s);
  339. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  340. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  341. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  342. if (qdf_mem_multi_page_link(soc->osdev,
  343. &dp_tx_ext_desc_pool->desc_link_pages,
  344. dp_tx_ext_desc_pool->link_elem_size,
  345. dp_tx_ext_desc_pool->elem_count,
  346. true)) {
  347. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  348. "ext link desc page linking fail");
  349. status = QDF_STATUS_E_FAULT;
  350. goto fail;
  351. }
  352. /* Assign coherent memory pointer into linked free list */
  353. pages = &dp_tx_ext_desc_pool->desc_pages;
  354. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  355. c_elem = dp_tx_ext_desc_pool->freelist;
  356. p_elem = c_elem;
  357. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  358. if (!(i % pages->num_element_per_page)) {
  359. /**
  360. * First element for new page,
  361. * should point next page
  362. */
  363. if (!pages->dma_pages->page_v_addr_start) {
  364. QDF_TRACE(QDF_MODULE_ID_DP,
  365. QDF_TRACE_LEVEL_ERROR,
  366. "link over flow");
  367. status = QDF_STATUS_E_FAULT;
  368. goto fail;
  369. }
  370. c_elem->vaddr =
  371. (void *)page_info->page_v_addr_start;
  372. c_elem->paddr = page_info->page_p_addr;
  373. page_info++;
  374. } else {
  375. c_elem->vaddr = (void *)(p_elem->vaddr +
  376. dp_tx_ext_desc_pool->elem_size);
  377. c_elem->paddr = (p_elem->paddr +
  378. dp_tx_ext_desc_pool->elem_size);
  379. }
  380. p_elem = c_elem;
  381. c_elem = c_elem->next;
  382. if (!c_elem)
  383. break;
  384. }
  385. dp_tx_ext_desc_pool->num_free = num_elem;
  386. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  387. return QDF_STATUS_SUCCESS;
  388. fail:
  389. return status;
  390. }
  391. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  392. uint32_t num_elem)
  393. {
  394. uint8_t pool_id;
  395. QDF_STATUS status;
  396. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  397. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  398. if (QDF_IS_STATUS_ERROR(status)) {
  399. dp_err("failed to init ext desc pool %d", pool_id);
  400. goto fail;
  401. }
  402. }
  403. return QDF_STATUS_SUCCESS;
  404. fail:
  405. return status;
  406. }
  407. void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  408. {
  409. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  410. qdf_dma_context_t memctx = 0;
  411. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  412. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  413. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
  414. &dp_tx_ext_desc_pool->desc_link_pages,
  415. 0, true);
  416. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
  417. &dp_tx_ext_desc_pool->desc_pages,
  418. memctx, false);
  419. }
  420. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  421. {
  422. uint8_t pool_id;
  423. for (pool_id = 0; pool_id < num_pool; pool_id++)
  424. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  425. }
  426. void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  427. {
  428. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  429. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  430. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  431. }
  432. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  433. {
  434. uint8_t pool_id;
  435. for (pool_id = 0; pool_id < num_pool; pool_id++)
  436. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  437. }
  438. #if defined(FEATURE_TSO)
  439. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  440. uint8_t pool_id)
  441. {
  442. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  443. uint32_t desc_size;
  444. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  445. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  446. tso_desc_pool->num_free = 0;
  447. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
  448. &tso_desc_pool->desc_pages,
  449. desc_size, num_elem, 0, true);
  450. if (!tso_desc_pool->desc_pages.num_pages) {
  451. dp_err("Multi page alloc fail, tx desc");
  452. return QDF_STATUS_E_NOMEM;
  453. }
  454. return QDF_STATUS_SUCCESS;
  455. }
  456. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  457. uint32_t num_elem)
  458. {
  459. uint32_t pool_id, i;
  460. QDF_STATUS status;
  461. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  462. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
  463. pool_id);
  464. if (QDF_IS_STATUS_ERROR(status)) {
  465. dp_err("failed to allocate TSO desc pool %d", pool_id);
  466. goto fail;
  467. }
  468. }
  469. return QDF_STATUS_SUCCESS;
  470. fail:
  471. for (i = 0; i < pool_id; i++)
  472. dp_tx_tso_desc_pool_free_by_id(soc, i);
  473. return QDF_STATUS_E_NOMEM;
  474. }
  475. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  476. {
  477. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  478. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  479. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
  480. &tso_desc_pool->desc_pages,
  481. 0, true);
  482. }
  483. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  484. {
  485. uint32_t pool_id;
  486. for (pool_id = 0; pool_id < num_pool; pool_id++)
  487. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  488. }
  489. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  490. uint8_t pool_id)
  491. {
  492. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  493. uint32_t desc_size;
  494. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  495. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  496. if (qdf_mem_multi_page_link(soc->osdev,
  497. &tso_desc_pool->desc_pages,
  498. desc_size,
  499. num_elem, true)) {
  500. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  501. "invalid tso desc allocation - overflow num link");
  502. return QDF_STATUS_E_FAULT;
  503. }
  504. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  505. *tso_desc_pool->desc_pages.cacheable_pages;
  506. tso_desc_pool->num_free = num_elem;
  507. TSO_DEBUG("Number of free descriptors: %u\n",
  508. tso_desc_pool->num_free);
  509. tso_desc_pool->pool_size = num_elem;
  510. qdf_spinlock_create(&tso_desc_pool->lock);
  511. return QDF_STATUS_SUCCESS;
  512. }
  513. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  514. uint32_t num_elem)
  515. {
  516. QDF_STATUS status;
  517. uint32_t pool_id;
  518. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  519. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
  520. pool_id);
  521. if (QDF_IS_STATUS_ERROR(status)) {
  522. dp_err("failed to initialise TSO desc pool %d", pool_id);
  523. return status;
  524. }
  525. }
  526. return QDF_STATUS_SUCCESS;
  527. }
  528. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  529. {
  530. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  531. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  532. if (tso_desc_pool->pool_size) {
  533. qdf_spin_lock_bh(&tso_desc_pool->lock);
  534. tso_desc_pool->freelist = NULL;
  535. tso_desc_pool->num_free = 0;
  536. tso_desc_pool->pool_size = 0;
  537. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  538. qdf_spinlock_destroy(&tso_desc_pool->lock);
  539. }
  540. }
  541. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  542. {
  543. uint32_t pool_id;
  544. for (pool_id = 0; pool_id < num_pool; pool_id++)
  545. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  546. }
  547. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  548. uint32_t num_elem,
  549. uint8_t pool_id)
  550. {
  551. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  552. uint32_t desc_size;
  553. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  554. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  555. tso_num_seg_pool->num_free = 0;
  556. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  557. &tso_num_seg_pool->desc_pages,
  558. desc_size,
  559. num_elem, 0, true);
  560. if (!tso_num_seg_pool->desc_pages.num_pages) {
  561. dp_err("Multi page alloc fail, tso_num_seg_pool");
  562. return QDF_STATUS_E_NOMEM;
  563. }
  564. return QDF_STATUS_SUCCESS;
  565. }
  566. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  567. uint32_t num_elem)
  568. {
  569. uint32_t pool_id, i;
  570. QDF_STATUS status;
  571. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  572. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
  573. pool_id);
  574. if (QDF_IS_STATUS_ERROR(status)) {
  575. dp_err("failed to allocate TSO num seg pool %d", pool_id);
  576. goto fail;
  577. }
  578. }
  579. return QDF_STATUS_SUCCESS;
  580. fail:
  581. for (i = 0; i < pool_id; i++)
  582. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  583. return QDF_STATUS_E_NOMEM;
  584. }
  585. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  586. {
  587. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  588. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  589. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
  590. &tso_num_seg_pool->desc_pages,
  591. 0, true);
  592. }
  593. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  594. {
  595. uint32_t pool_id;
  596. for (pool_id = 0; pool_id < num_pool; pool_id++)
  597. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  598. }
  599. QDF_STATUS
  600. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  601. uint8_t pool_id)
  602. {
  603. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  604. uint32_t desc_size;
  605. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  606. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  607. if (qdf_mem_multi_page_link(soc->osdev,
  608. &tso_num_seg_pool->desc_pages,
  609. desc_size,
  610. num_elem, true)) {
  611. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  612. "invalid tso desc allocation - overflow num link");
  613. return QDF_STATUS_E_FAULT;
  614. }
  615. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  616. *tso_num_seg_pool->desc_pages.cacheable_pages;
  617. tso_num_seg_pool->num_free = num_elem;
  618. tso_num_seg_pool->num_seg_pool_size = num_elem;
  619. qdf_spinlock_create(&tso_num_seg_pool->lock);
  620. return QDF_STATUS_SUCCESS;
  621. }
  622. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  623. uint32_t num_elem)
  624. {
  625. uint32_t pool_id;
  626. QDF_STATUS status;
  627. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  628. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
  629. pool_id);
  630. if (QDF_IS_STATUS_ERROR(status)) {
  631. dp_err("failed to initialise TSO num seg pool %d", pool_id);
  632. return status;
  633. }
  634. }
  635. return QDF_STATUS_SUCCESS;
  636. }
  637. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  638. {
  639. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  640. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  641. if (tso_num_seg_pool->num_seg_pool_size) {
  642. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  643. tso_num_seg_pool->freelist = NULL;
  644. tso_num_seg_pool->num_free = 0;
  645. tso_num_seg_pool->num_seg_pool_size = 0;
  646. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  647. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  648. }
  649. }
  650. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  651. {
  652. uint32_t pool_id;
  653. for (pool_id = 0; pool_id < num_pool; pool_id++)
  654. dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
  655. }
  656. #else
  657. QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
  658. uint8_t pool_id)
  659. {
  660. return QDF_STATUS_SUCCESS;
  661. }
  662. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  663. uint32_t num_elem)
  664. {
  665. return QDF_STATUS_SUCCESS;
  666. }
  667. QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  668. uint8_t pool_id)
  669. {
  670. return QDF_STATUS_SUCCESS;
  671. }
  672. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  673. uint32_t num_elem)
  674. {
  675. return QDF_STATUS_SUCCESS;
  676. }
  677. void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  678. {
  679. }
  680. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  681. {
  682. }
  683. void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  684. {
  685. }
  686. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  687. {
  688. }
  689. QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
  690. uint32_t num_elem,
  691. uint8_t pool_id)
  692. {
  693. return QDF_STATUS_SUCCESS;
  694. }
  695. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  696. uint32_t num_elem)
  697. {
  698. return QDF_STATUS_SUCCESS;
  699. }
  700. void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
  701. {
  702. }
  703. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  704. {
  705. }
  706. QDF_STATUS
  707. dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
  708. uint8_t pool_id)
  709. {
  710. return QDF_STATUS_SUCCESS;
  711. }
  712. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  713. uint32_t num_elem)
  714. {
  715. return QDF_STATUS_SUCCESS;
  716. }
  717. void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
  718. {
  719. }
  720. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  721. {
  722. }
  723. #endif